text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
# -*- coding: utf-8 -*- # # Copyright (c) 2020, the cclib development team # # This file is part of cclib (http://cclib.github.io) and is distributed under # the terms of the BSD 3-Clause License. """Parser for Q-Chem output files""" import itertools import math import re import datetime import numpy from packaging.version import parse as parse_version, Version from cclib.parser import logfileparser from cclib.parser import utils class QChem(logfileparser.Logfile): """A Q-Chem log file.""" def __init__(self, *args, **kwargs): # Call the __init__ method of the superclass super(QChem, self).__init__(logname="QChem", *args, **kwargs) def __str__(self): """Return a string representation of the object.""" return "QChem log file %s" % (self.filename) def __repr__(self): """Return a representation of the object.""" return 'QChem("%s")' % (self.filename) def normalisesym(self, label): """Q-Chem does not require normalizing symmetry labels.""" return label def before_parsing(self): # Keep track of whether or not we're performing an # (un)restricted calculation. self.unrestricted = False self.is_rohf = False # Keep track of whether or not this is a fragment calculation, # so that only the supersystem is parsed. self.is_fragment_section = False # These headers identify when a fragment section is # entered/exited. self.fragment_section_headers = ( 'Guess MOs from converged MOs on fragments', 'CP correction for fragment', ) self.supersystem_section_headers = ( 'Done with SCF on isolated fragments', 'Done with counterpoise correction on fragments', ) # Compile the dashes-and-or-spaces-only regex. self.re_dashes_and_spaces = re.compile(r'^[\s-]+$') # Compile the regex for extracting the atomic index from an # aoname. self.re_atomindex = re.compile(r'(\d+)_') # QChem changed the number of spaces from version 5.1 to 5.2 # D( 35) --> V( 3) amplitude = 0.0644 # S( 1) --> V( 1) amplitude = -0.1628 alpha # D(189) --> S( 1) amplitude = -0.0120 beta self.re_tddft = re.compile(r'([SD])\( *(\d+)\) --> ([VS])\( *(\d+)\) amplitude = *([^ ]*)( (alpha|beta))?') # A maximum of 6 columns per block when printing matrices. The # Fock matrix is 4. self.ncolsblock = 6 # By default, when asked to print orbitals via # `scf_print`/`scf_final_print` and/or `print_orbitals`, Q-Chem will # print all occupieds and the first 5 virtuals for versions prior to # 5.2. # # When the number is set for `print_orbitals`, that section of # the output will display (NOcc + that many virtual) MOs, but # any other sections present due to # `scf_print`/`scf_final_print` will still only display (NOcc # + 5) MOs. It is the `print_orbitals` section that `aonames` # is parsed from. # # Note that the (AO basis) density matrix is always (NBasis * # NBasis)! self.norbdisp_alpha = self.norbdisp_beta = 5 self.norbdisp_alpha_aonames = self.norbdisp_beta_aonames = 5 self.alpha_mo_coefficient_headers = ( 'RESTRICTED (RHF) MOLECULAR ORBITAL COEFFICIENTS', 'ALPHA MOLECULAR ORBITAL COEFFICIENTS' ) self.gradient_headers = ( 'Full Analytical Gradient', 'Gradient of SCF Energy', 'Gradient of MP2 Energy', ) self.hessian_headers = ( 'Hessian of the SCF Energy', 'Final Hessian.', ) self.wfn_method = [ 'HF', 'MP2', 'RI-MP2', 'LOCAL_MP2', 'MP4', 'CCD', 'CCSD', 'CCSD(T)', 'QCISD', 'QCISD(T)' ] def after_parsing(self): # If parsing a fragment job, each of the geometries appended to # `atomcoords` may be of different lengths, which will prevent # conversion from a list to NumPy array. # Take the length of the first geometry as correct, and remove # all others with different lengths. if len(self.atomcoords) > 1: correctlen = len(self.atomcoords[0]) self.atomcoords[:] = [coords for coords in self.atomcoords if len(coords) == correctlen] # At the moment, there is no similar correction for other properties! # QChem does not print all MO coefficients by default, but rather # up to HOMO+5. So, fill up the missing values with NaNs. If there are # other cases where coefficient are missing, but different ones, this # general afterthought might not be appropriate and the fix will # need to be done while parsing. if hasattr(self, 'mocoeffs'): for im in range(len(self.mocoeffs)): _nmo, _nbasis = self.mocoeffs[im].shape if (_nmo, _nbasis) != (self.nmo, self.nbasis): coeffs = numpy.empty((self.nmo, self.nbasis)) coeffs[:] = numpy.nan coeffs[0:_nmo, 0:_nbasis] = self.mocoeffs[im] self.mocoeffs[im] = coeffs # When parsing the 'MOLECULAR ORBITAL COEFFICIENTS' block for # `aonames`, Q-Chem doesn't print the principal quantum number # for each shell; this needs to be added. if hasattr(self, 'aonames') and hasattr(self, 'atombasis'): angmom = ('', 'S', 'P', 'D', 'F', 'G', 'H', 'I') for atom in self.atombasis: bfcounts = dict() for bfindex in atom: atomname, bfname = self.aonames[bfindex].split('_') # Keep track of how many times each shell type has # appeared. if bfname in bfcounts: bfcounts[bfname] += 1 else: # Make sure the starting number for type of # angular momentum begins at the appropriate # principal quantum number (1S, 2P, 3D, 4F, # ...). bfcounts[bfname] = angmom.index(bfname[0]) newbfname = '{}{}'.format(bfcounts[bfname], bfname) self.aonames[bfindex] = '_'.join([atomname, newbfname]) # Assign the number of core electrons replaced by ECPs. if hasattr(self, 'user_input') and self.user_input.get('rem') is not None: if self.user_input['rem'].get('ecp') is not None: ecp_is_gen = (self.user_input['rem']['ecp'] == 'gen') if ecp_is_gen: assert 'ecp' in self.user_input has_iprint = hasattr(self, 'possible_ecps') if not ecp_is_gen and not has_iprint: msg = """ECPs are present, but the number of core \ electrons isn't printed at all. Rerun with "iprint >= 100" to get \ coreelectrons.""" self.logger.warning(msg) self.incorrect_coreelectrons = True elif ecp_is_gen and not has_iprint: nmissing = sum(ncore == 0 for (_, _, ncore) in self.user_input['ecp']) if nmissing > 1: msg = """ECPs are present, but coreelectrons can only \ be guessed for one element at most. Rerun with "iprint >= 100" to get \ coreelectrons.""" self.logger.warning(msg) self.incorrect_coreelectrons = True elif self.user_input['molecule'].get('charge') is None: msg = """ECPs are present, but the total charge \ cannot be determined. Rerun without `$molecule read`.""" self.logger.warning(msg) self.incorrect_coreelectrons = True else: user_charge = self.user_input['molecule']['charge'] # First, assign the entries given # explicitly. for entry in self.user_input['ecp']: element, _, ncore = entry if ncore > 0: self._assign_coreelectrons_to_element(element, ncore) # Because of how the charge is calculated # during extract(), this is the number of # remaining core electrons that need to be # assigned ECP centers. Filter out the # remaining entries, of which there should # only be one. core_sum = self.coreelectrons.sum() if hasattr(self, 'coreelectrons') else 0 remainder = self.charge - user_charge - core_sum entries = [entry for entry in self.user_input['ecp'] if entry[2] == 0] if len(entries) != 0: assert len(entries) == 1 element, _, ncore = entries[0] assert ncore == 0 self._assign_coreelectrons_to_element( element, remainder, ncore_is_total_count=True) elif not ecp_is_gen and has_iprint: atomsymbols = [self.table.element[atomno] for atomno in self.atomnos] for i in range(self.natom): if atomsymbols[i] in self.possible_ecps: self.coreelectrons[i] = self.possible_ecps[atomsymbols[i]] else: assert ecp_is_gen and has_iprint for entry in self.user_input['ecp']: element, _, ncore = entry # If ncore is non-zero, then it must be # user-defined, and we take that # value. Otherwise, look it up. if ncore == 0: ncore = self.possible_ecps[element] self._assign_coreelectrons_to_element(element, ncore) # Check to see if the charge is consistent with the input # section. It may not be if using an ECP. if hasattr(self, 'user_input'): if self.user_input.get('molecule') is not None: user_charge = self.user_input['molecule'].get('charge') if user_charge is not None: self.set_attribute('charge', user_charge) def parse_charge_section(self, inputfile, chargetype): """Parse the population analysis charge block.""" self.skip_line(inputfile, 'blank') line = next(inputfile) has_spins = False if 'Spin' in line: if not hasattr(self, 'atomspins'): self.atomspins = dict() has_spins = True spins = [] self.skip_line(inputfile, 'dashes') if not hasattr(self, 'atomcharges'): self.atomcharges = dict() charges = [] line = next(inputfile) while list(set(line.strip())) != ['-']: elements = line.split() charge = utils.float(elements[2]) charges.append(charge) if has_spins: spin = utils.float(elements[3]) spins.append(spin) line = next(inputfile) self.atomcharges[chargetype] = numpy.array(charges) if has_spins: self.atomspins[chargetype] = numpy.array(spins) @staticmethod def parse_matrix(inputfile, nrows, ncols, ncolsblock): """Q-Chem prints most matrices in a standard format; parse the matrix into a NumPy array of the appropriate shape. """ nparray = numpy.empty(shape=(nrows, ncols)) line = next(inputfile) assert len(line.split()) == min(ncolsblock, ncols) colcounter = 0 while colcounter < ncols: # If the line is just the column header (indices)... if line[:5].strip() == '': line = next(inputfile) rowcounter = 0 while rowcounter < nrows: row = list(map(float, line.split()[1:])) assert len(row) == min(ncolsblock, (ncols - colcounter)) nparray[rowcounter][colcounter:colcounter + ncolsblock] = row line = next(inputfile) rowcounter += 1 colcounter += ncolsblock return nparray def parse_matrix_aonames(self, inputfile, nrows, ncols): """Q-Chem prints most matrices in a standard format; parse the matrix into a preallocated NumPy array of the appropriate shape. Rather than have one routine for parsing all general matrices and the 'MOLECULAR ORBITAL COEFFICIENTS' block, use a second which handles `aonames`. """ bigmom = ('d', 'f', 'g', 'h') nparray = numpy.empty(shape=(nrows, ncols)) line = next(inputfile) assert len(line.split()) == min(self.ncolsblock, ncols) colcounter = 0 split_fixed = utils.WidthSplitter((4, 4, 4, 6, 10, 10, 10, 10, 10, 10)) while colcounter < ncols: # If the line is just the column header (indices)... if line[:5].strip() == '': line = next(inputfile) # Do nothing for now. if 'eigenvalues' in line: line = next(inputfile) rowcounter = 0 while rowcounter < nrows: row = split_fixed.split(line) # Only take the AO names on the first time through. if colcounter == 0: if len(self.aonames) != self.nbasis: # Apply the offset for rows where there is # more than one atom of any element in the # molecule. offset = 1 if row[2] != '': name = self.atommap.get(row[1] + str(row[2])) else: name = self.atommap.get(row[1] + '1') # For l > 1, there is a space between l and # m_l when using spherical functions. shell = row[2 + offset] if shell in bigmom: shell = ''.join([shell, row[3 + offset]]) aoname = ''.join([name, '_', shell.upper()]) self.aonames.append(aoname) row = list(map(float, row[-min(self.ncolsblock, (ncols - colcounter)):])) nparray[rowcounter][colcounter:colcounter + self.ncolsblock] = row line = next(inputfile) rowcounter += 1 colcounter += self.ncolsblock return nparray def parse_orbital_energies_and_symmetries(self, inputfile): """Parse the 'Orbital Energies (a.u.)' block appearing after SCF converges, which optionally includes MO symmetries. Based upon the Occupied/Virtual labeling, the HOMO is also parsed. """ energies = [] symbols = [] line = next(inputfile) # Sometimes Q-Chem gets a little confused... while "MOs" not in line: line = next(inputfile) line = next(inputfile) # The end of the block is either a blank line or only dashes. while not self.re_dashes_and_spaces.search(line) \ and not 'Warning : Irrep of orbital' in line: if 'Occupied' in line or 'Virtual' in line: # A nice trick to find where the HOMO is. if 'Virtual' in line: homo = len(energies) - 1 line = next(inputfile) tokens = line.split() # If the line contains letters, it must be the MO # symmetries. Otherwise, it's the energies. if re.search("[a-zA-Z]", line): symbols.extend(tokens[1::2]) else: for e in tokens: try: energy = utils.convertor(utils.float(e), 'hartree', 'eV') except ValueError: energy = numpy.nan energies.append(energy) line = next(inputfile) # MO symmetries are either not present or there is one for each MO # (energy). assert len(symbols) in (0, len(energies)) return energies, symbols, homo def generate_atom_map(self): """Generate the map to go from Q-Chem atom numbering: 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H3', 'H4', 'C7', ... to cclib atom numbering: 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H7', 'H8', 'H9', 'H10', 'C11', ... for later use. """ # Generate the desired order. order_proper = [element + str(num) for element, num in zip(self.atomelements, itertools.count(start=1))] # We need separate counters for each element. element_counters = {element: itertools.count(start=1) for element in set(self.atomelements)} # Generate the Q-Chem printed order. order_qchem = [element + str(next(element_counters[element])) for element in self.atomelements] # Combine the orders into a mapping. atommap = {k: v for k, v, in zip(order_qchem, order_proper)} return atommap def generate_formula_histogram(self): """From the atomnos, generate a histogram that represents the molecular formula. """ histogram = dict() for element in self.atomelements: if element in histogram.keys(): histogram[element] += 1 else: histogram[element] = 1 return histogram def extract(self, inputfile, line): """Extract information from the file object inputfile.""" # Extract the version number and optionally the version # control info. if any(version_trigger in line for version_trigger in ("Q-Chem", "Unrecognized platform", "Version")): # Part 1 matches # - `Q-Chem 4.3.0 for Intel X86 EM64T Linux` # Part 2 matches # - `Unrecognized platform!!! 4.0.0.1` # Part 3 matches # - `Intel X86 EM64T Linux Version 4.1.0.1 ` # but not # - `Additional authors for Version 3.1:` # - `Q-Chem, Version 4.1, Q-Chem, Inc., Pittsburgh, PA (2013).` match = re.search( r"Q-Chem\s([\d\.]*)\sfor|" r"Unrecognized platform!!!\s([\d\.]*)\b|" r"Version\s([\d\.]*)\s*$", line ) if match: groups = [s for s in match.groups() if s is not None] assert len(groups) == 1 package_version = groups[0] self.metadata["package_version"] = package_version self.metadata["legacy_package_version"] = package_version self.set_attribute("parsed_svn_revision", False) # Avoid "Last SVN revision" entry. if "SVN revision" in line and "Last" not in line: svn_revision = line.split()[3] line = next(inputfile) svn_branch = line.split()[3].replace("/", "_") if "package_version" in self.metadata \ and hasattr(self, "parsed_svn_revision") \ and not self.parsed_svn_revision: self.metadata["package_version"] = "{}dev+{}-{}".format( self.metadata["package_version"], svn_branch, svn_revision ) parsed_version = parse_version(self.metadata["package_version"]) assert isinstance(parsed_version, Version) self.set_attribute("package_version", parsed_version) self.set_attribute("parsed_svn_revision", True) # Disable/enable parsing for fragment sections. if any(message in line for message in self.fragment_section_headers): self.is_fragment_section = True if any(message in line for message in self.supersystem_section_headers): self.is_fragment_section = False if not self.is_fragment_section: # If the input section is repeated back, parse the $rem and # $molecule sections. if line[0:11] == 'User input:': self.user_input = dict() self.skip_line(inputfile, 'd') while list(set(line.strip())) != ['-']: if line.strip().lower() == '$rem': self.user_input['rem'] = dict() while line.strip().lower() != '$end': line = next(inputfile).lower() if line.strip() == '$end': break # Apparently calculations can run without # a matching $end...this terminates the # user input section no matter what. if line.strip() == ('-' * 62): break tokens = line.split() # Allow blank lines. if len(tokens) == 0: continue # Entries may be separated by an equals # sign, and can have comments, for example: # ecp gen # ecp = gen # ecp gen ! only on first chlorine # ecp = gen only on first chlorine assert len(tokens) >= 2 keyword = tokens[0] if tokens[1] == '=': option = tokens[2] else: option = tokens[1] self.user_input['rem'][keyword] = option if keyword == 'method': method = option.upper() if method in self.wfn_method: self.metadata["methods"].append(method) else: self.metadata["methods"].append('DFT') self.metadata["functional"] = method if keyword == 'exchange': self.metadata["methods"].append('DFT') self.metadata["functional"] = option if keyword == 'print_orbitals': # Stay with the default value if a number isn't # specified. if option in ('true', 'false'): continue else: norbdisp_aonames = int(option) self.norbdisp_alpha_aonames = norbdisp_aonames self.norbdisp_beta_aonames = norbdisp_aonames if line.strip().lower() == '$ecp': self.user_input['ecp'] = [] line = next(inputfile) while line.strip().lower() != '$end': while list(set(line.strip())) != ['*']: # Parse the element for this ECP # entry. If only the element is on # this line, or the 2nd token is 0, it # applies to all atoms; if it's > 0, # then it indexes (1-based) that # specific atom in the whole molecule. tokens = line.split() assert len(tokens) > 0 element = tokens[0][0].upper() + tokens[0][1:].lower() assert element in self.table.element if len(tokens) > 1: assert len(tokens) == 2 index = int(tokens[1]) - 1 else: index = -1 line = next(inputfile) # Next comes the ECP definition. If # the line contains only a single # item, it's a built-in ECP, otherwise # it's a full definition. tokens = line.split() if len(tokens) == 1: ncore = 0 line = next(inputfile) else: assert len(tokens) == 3 ncore = int(tokens[2]) # Don't parse the remainder of the # ECP definition. while list(set(line.strip())) != ['*']: line = next(inputfile) entry = (element, index, ncore) self.user_input['ecp'].append(entry) line = next(inputfile) if line.strip().lower() == '$end': break if line.strip().lower() == '$molecule': self.user_input['molecule'] = dict() line = next(inputfile) # Don't read the molecule, only the # supersystem charge and multiplicity. if line.split()[0].lower() == 'read': pass else: charge, mult = [int(x) for x in line.split()] self.user_input['molecule']['charge'] = charge self.user_input['molecule']['mult'] = mult line = next(inputfile).lower() # Parse the basis set name if 'Requested basis set' in line: self.metadata["basis_set"] = line.split()[-1] # Parse the general basis for `gbasis`, in the style used by # Gaussian. if 'Basis set in general basis input format:' in line: self.skip_lines(inputfile, ['d', '$basis']) line = next(inputfile) if not hasattr(self, 'gbasis'): self.gbasis = [] # The end of the general basis block. while '$end' not in line: atom = [] # 1. Contains element symbol and atomic index of # basis functions; if 0, applies to all atoms of # same element. assert len(line.split()) == 2 line = next(inputfile) # The end of each atomic block. while '****' not in line: # 2. Contains the type of basis function {S, SP, # P, D, F, G, H, ...}, the number of primitives, # and the weight of the final contracted function. bfsplitline = line.split() assert len(bfsplitline) == 3 bftype = bfsplitline[0] nprim = int(bfsplitline[1]) line = next(inputfile) # 3. The primitive basis functions that compose # the contracted basis function; there are `nprim` # of them. The first value is the exponent, and # the second value is the contraction # coefficient. If `bftype == 'SP'`, the primitives # are for both S- and P-type basis functions but # with separate contraction coefficients, # resulting in three columns. if bftype == 'SP': primitives_S = [] primitives_P = [] else: primitives = [] # For each primitive in the contracted basis # function... for iprim in range(nprim): primsplitline = line.split() exponent = float(primsplitline[0]) if bftype == 'SP': assert len(primsplitline) == 3 coefficient_S = float(primsplitline[1]) coefficient_P = float(primsplitline[2]) primitives_S.append((exponent, coefficient_S)) primitives_P.append((exponent, coefficient_P)) else: assert len(primsplitline) == 2 coefficient = float(primsplitline[1]) primitives.append((exponent, coefficient)) line = next(inputfile) if bftype == 'SP': bf_S = ('S', primitives_S) bf_P = ('P', primitives_P) atom.append(bf_S) atom.append(bf_P) else: bf = (bftype, primitives) atom.append(bf) # Move to the next contracted basis function # as long as we don't hit the '****' atom # delimiter. self.gbasis.append(atom) line = next(inputfile) if line.strip() == 'The following effective core potentials will be applied': # Keep track of all elements that may have an ECP on # them. *Which* centers have an ECP can't be # determined here, so just take the number of valence # electrons, then later later figure out the centers # and do core = Z - valence. self.possible_ecps = dict() # This will fail if an element has more than one kind # of ECP. split_fixed = utils.WidthSplitter((4, 13, 20, 2, 14, 14)) self.skip_lines(inputfile, ['d', 'header', 'header', 'd']) line = next(inputfile) while list(set(line.strip())) != ['-']: tokens = split_fixed.split(line) if tokens[0] != '': element = tokens[0] valence = int(tokens[1]) ncore = self.table.number[element] - valence self.possible_ecps[element] = ncore line = next(inputfile) if 'TIME STEP #' in line: tokens = line.split() self.append_attribute('time', float(tokens[8])) if line.strip() == "Adding empirical dispersion correction": while "energy" not in line: line = next(inputfile) self.append_attribute( "dispersionenergies", utils.convertor(utils.float(line.split()[-2]), "hartree", "eV") ) # Extract the atomic numbers and coordinates of the atoms. if 'Standard Nuclear Orientation' in line: if "Angstroms" in line: convertor = lambda x: x elif 'Bohr' in line: convertor = lambda x: utils.convertor(x, 'bohr', 'Angstrom') else: raise ValueError("Unknown units in coordinate header: {}".format(line)) self.skip_lines(inputfile, ['cols', 'dashes']) atomelements = [] atomcoords = [] line = next(inputfile) while list(set(line.strip())) != ['-']: entry = line.split() atomelements.append(entry[1]) atomcoords.append([convertor(float(value)) for value in entry[2:]]) line = next(inputfile) self.append_attribute('atomcoords', atomcoords) # We calculate and handle atomnos no matter what, since in # the case of fragment calculations the atoms may change, # along with the charge and spin multiplicity. self.atomnos = [] self.atomelements = [] for atomelement in atomelements: self.atomelements.append(atomelement) if atomelement == 'GH': self.atomnos.append(0) else: self.atomnos.append(self.table.number[atomelement]) self.natom = len(self.atomnos) self.atommap = self.generate_atom_map() self.formula_histogram = self.generate_formula_histogram() # Number of electrons. # Useful for determining the number of occupied/virtual orbitals. if 'Nuclear Repulsion Energy' in line: line = next(inputfile) nelec_re_string = r'There are(\s+[0-9]+) alpha and(\s+[0-9]+) beta electrons' match = re.findall(nelec_re_string, line.strip()) self.set_attribute('nalpha', int(match[0][0].strip())) self.set_attribute('nbeta', int(match[0][1].strip())) self.norbdisp_alpha += self.nalpha self.norbdisp_alpha_aonames += self.nalpha self.norbdisp_beta += self.nbeta self.norbdisp_beta_aonames += self.nbeta # Calculate the spin multiplicity (2S + 1), where S is the # total spin of the system. S = (self.nalpha - self.nbeta) / 2 mult = int(2 * S + 1) self.set_attribute('mult', mult) # Calculate the molecular charge as the difference between # the atomic numbers and the number of electrons. if hasattr(self, 'atomnos'): charge = sum(self.atomnos) - (self.nalpha + self.nbeta) self.set_attribute('charge', charge) # Number of basis functions. if 'basis functions' in line: if not hasattr(self, 'nbasis'): self.set_attribute('nbasis', int(line.split()[-3])) # In the case that there are fewer basis functions # (and therefore MOs) than default number of MOs # displayed, reset the display values. self.norbdisp_alpha = min(self.norbdisp_alpha, self.nbasis) self.norbdisp_alpha_aonames = min(self.norbdisp_alpha_aonames, self.nbasis) self.norbdisp_beta = min(self.norbdisp_beta, self.nbasis) self.norbdisp_beta_aonames = min(self.norbdisp_beta_aonames, self.nbasis) # Finally, versions of Q-Chem greater than 5.1.2 print all MOs in # the "Final <Spin> MO Coefficients" blocks, but *not* the # "MOLECULAR ORBITAL COEFFICIENTS" blocks. if hasattr(self, "package_version"): pv = self.package_version if pv.major >= 5 and pv.minor > 1: norbdisp = None if hasattr(self, "nmo"): norbdisp = self.nmo elif hasattr(self, "nbasis"): norbdisp = self.nbasis if norbdisp is not None: self.norbdisp_alpha = norbdisp self.norbdisp_beta = norbdisp # Check for whether or not we're peforming an # (un)restricted calculation. if 'calculation will be' in line: if ' restricted' in line: self.unrestricted = False if 'unrestricted' in line: self.unrestricted = True if hasattr(self, 'nalpha') and hasattr(self, 'nbeta'): if self.nalpha != self.nbeta: self.unrestricted = True self.is_rohf = True # Section with SCF iterations goes like this: # # SCF converges when DIIS error is below 1.0E-05 # --------------------------------------- # Cycle Energy DIIS Error # --------------------------------------- # 1 -381.9238072190 1.39E-01 # 2 -382.2937212775 3.10E-03 # 3 -382.2939780242 3.37E-03 # ... # scf_success_messages = ( 'Convergence criterion met', 'corrected energy' ) scf_failure_messages = ( 'SCF failed to converge', 'Convergence failure' ) if 'SCF converges when ' in line: if not hasattr(self, 'scftargets'): self.scftargets = [] target = float(line.split()[-1]) self.scftargets.append([target]) # We should have the header between dashes now, # but sometimes there are lines before the first dashes. while not 'Cycle Energy' in line: line = next(inputfile) self.skip_line(inputfile, 'd') values = [] iter_counter = 1 line = next(inputfile) while not any(message in line for message in scf_success_messages): # Some trickery to avoid a lot of printing that can occur # between each SCF iteration. entry = line.split() if len(entry) > 0: if entry[0] == str(iter_counter): # Q-Chem only outputs one error metric. error = float(entry[2]) values.append([error]) iter_counter += 1 try: line = next(inputfile) # Is this the end of the file for some reason? except StopIteration: self.logger.warning('File terminated before end of last SCF! Last error: {}'.format(error)) break # We've converged, but still need the last iteration. if any(message in line for message in scf_success_messages): entry = line.split() error = float(entry[2]) values.append([error]) iter_counter += 1 # This is printed in regression QChem4.2/dvb_sp_unconverged.out # so use it to bail out when convergence fails. if any(message in line for message in scf_failure_messages): break if not hasattr(self, 'scfvalues'): self.scfvalues = [] self.scfvalues.append(numpy.array(values)) # Molecular orbital coefficients. # Try parsing them from this block (which comes from # `scf_final_print = 2``) rather than the combined # aonames/mocoeffs/moenergies block (which comes from # `print_orbitals = true`). if 'Final Alpha MO Coefficients' in line: if not hasattr(self, 'mocoeffs'): self.mocoeffs = [] mocoeffs = QChem.parse_matrix(inputfile, self.nbasis, self.norbdisp_alpha, self.ncolsblock) self.mocoeffs.append(mocoeffs.transpose()) if 'Final Beta MO Coefficients' in line: mocoeffs = QChem.parse_matrix(inputfile, self.nbasis, self.norbdisp_beta, self.ncolsblock) self.mocoeffs.append(mocoeffs.transpose()) if 'Total energy in the final basis set' in line: if not hasattr(self, 'scfenergies'): self.scfenergies = [] scfenergy = float(line.split()[-1]) self.scfenergies.append(utils.convertor(scfenergy, 'hartree', 'eV')) # Geometry optimization. if 'Maximum Tolerance Cnvgd?' in line: line_g = next(inputfile).split()[1:3] line_d = next(inputfile).split()[1:3] line_e = next(inputfile).split()[2:4] if not hasattr(self, 'geotargets'): self.geotargets = [line_g[1], line_d[1], utils.float(line_e[1])] if not hasattr(self, 'geovalues'): self.geovalues = [] maxg = utils.float(line_g[0]) maxd = utils.float(line_d[0]) ediff = utils.float(line_e[0]) geovalues = [maxg, maxd, ediff] self.geovalues.append(geovalues) if '** OPTIMIZATION CONVERGED **' in line: if not hasattr(self, 'optdone'): self.optdone = [] self.optdone.append(len(self.atomcoords)) if '** MAXIMUM OPTIMIZATION CYCLES REACHED **' in line: if not hasattr(self, 'optdone'): self.optdone = [] # Moller-Plesset corrections. # There are multiple modules in Q-Chem for calculating MPn energies: # cdman, ccman, and ccman2, all with different output. # # MP2, RI-MP2, and local MP2 all default to cdman, which has a simple # block of output after the regular SCF iterations. # # MP3 is handled by ccman2. # # MP4 and variants are handled by ccman. # This is the MP2/cdman case. if 'MP2 total energy' in line: if not hasattr(self, 'mpenergies'): self.mpenergies = [] mp2energy = float(line.split()[4]) mp2energy = utils.convertor(mp2energy, 'hartree', 'eV') self.mpenergies.append([mp2energy]) # This is the MP3/ccman2 case. if line[1:11] == 'MP2 energy' and line[12:19] != 'read as': if not hasattr(self, 'mpenergies'): self.mpenergies = [] mpenergies = [] mp2energy = float(line.split()[3]) mpenergies.append(mp2energy) line = next(inputfile) line = next(inputfile) # Just a safe check. if 'MP3 energy' in line: mp3energy = float(line.split()[3]) mpenergies.append(mp3energy) mpenergies = [utils.convertor(mpe, 'hartree', 'eV') for mpe in mpenergies] self.mpenergies.append(mpenergies) # This is the MP4/ccman case. if 'EHF' in line: if not hasattr(self, 'mpenergies'): self.mpenergies = [] mpenergies = [] while list(set(line.strip())) != ['-']: if 'EMP2' in line: mp2energy = float(line.split()[2]) mpenergies.append(mp2energy) if 'EMP3' in line: mp3energy = float(line.split()[2]) mpenergies.append(mp3energy) if 'EMP4SDQ' in line: mp4sdqenergy = float(line.split()[2]) mpenergies.append(mp4sdqenergy) # This is really MP4SD(T)Q. if 'EMP4 ' in line: mp4sdtqenergy = float(line.split()[2]) mpenergies.append(mp4sdtqenergy) line = next(inputfile) mpenergies = [utils.convertor(mpe, 'hartree', 'eV') for mpe in mpenergies] self.mpenergies.append(mpenergies) # Coupled cluster corrections. # Hopefully we only have to deal with ccman2 here. if 'CCD total energy' in line: if not hasattr(self, 'ccenergies'): self.ccenergies = [] ccdenergy = float(line.split()[-1]) ccdenergy = utils.convertor(ccdenergy, 'hartree', 'eV') self.ccenergies.append(ccdenergy) if 'CCSD total energy' in line: has_triples = False if not hasattr(self, 'ccenergies'): self.ccenergies = [] ccsdenergy = float(line.split()[-1]) # Make sure we aren't actually doing CCSD(T). line = next(inputfile) line = next(inputfile) if 'CCSD(T) total energy' in line: has_triples = True ccsdtenergy = float(line.split()[-1]) ccsdtenergy = utils.convertor(ccsdtenergy, 'hartree', 'eV') self.ccenergies.append(ccsdtenergy) if not has_triples: ccsdenergy = utils.convertor(ccsdenergy, 'hartree', 'eV') self.ccenergies.append(ccsdenergy) if line[:11] == " CCSD T1^2": t1_squared = float(line.split()[3]) t1_norm = math.sqrt(t1_squared) self.metadata["t1_diagnostic"] = t1_norm / math.sqrt(2 * (self.nalpha + self.nbeta)) # Electronic transitions. Works for both CIS and TDDFT. if 'Excitation Energies' in line: # Restricted: # --------------------------------------------------- # TDDFT/TDA Excitation Energies # --------------------------------------------------- # # Excited state 1: excitation energy (eV) = 3.6052 # Total energy for state 1: -382.167872200685 # Multiplicity: Triplet # Trans. Mom.: 0.0000 X 0.0000 Y 0.0000 Z # Strength : 0.0000 # D( 33) --> V( 3) amplitude = 0.2618 # D( 34) --> V( 2) amplitude = 0.2125 # D( 35) --> V( 1) amplitude = 0.9266 # # Unrestricted: # Excited state 2: excitation energy (eV) = 2.3156 # Total energy for state 2: -381.980177630969 # <S**2> : 0.7674 # Trans. Mom.: -2.7680 X -0.1089 Y 0.0000 Z # Strength : 0.4353 # S( 1) --> V( 1) amplitude = -0.3105 alpha # D( 34) --> S( 1) amplitude = 0.9322 beta self.skip_lines(inputfile, ['dashes', 'blank']) line = next(inputfile) etenergies = [] etsyms = [] etoscs = [] etsecs = [] spinmap = {'alpha': 0, 'beta': 1} while list(set(line.strip())) != ['-']: # Take the total energy for the state and subtract from the # ground state energy, rather than just the EE; # this will be more accurate. if 'Total energy for state' in line: energy = utils.convertor(float(line.split()[5]), 'hartree', 'wavenumber') etenergy = energy - utils.convertor(self.scfenergies[-1], 'eV', 'wavenumber') etenergies.append(etenergy) # if 'excitation energy' in line: # etenergy = utils.convertor(float(line.split()[-1]), 'eV', 'wavenumber') # etenergies.append(etenergy) if 'Multiplicity' in line: etsym = line.split()[1] etsyms.append(etsym) if 'Strength' in line: strength = float(line.split()[-1]) etoscs.append(strength) # This is the list of transitions. if 'amplitude' in line: sec = [] while line.strip() != '': re_match = self.re_tddft.search(line) if self.unrestricted: spin = spinmap[re_match.group(7)] else: spin = 0 # There is a subtle difference between TDA and RPA calcs, # because in the latter case each transition line is # preceeded by the type of vector: X or Y, name excitation # or deexcitation (see #154 for details). For deexcitations, # we will need to reverse the MO indices. Note also that Q-Chem # starts reindexing virtual orbitals at 1. if line[5] == '(': ttype = 'X' else: assert line[5] == ":" ttype = line[4] # get start and end indices of contribution # as the numbers written in parentheses: index_pattern = re.compile(r"\(( *\d+)\)") indices=index_pattern.findall(line) #assert len(indices)==2 # there must always be a 'start' and 'end' index. if self.unrestricted: # Here are three different countings: # The 'D'oubly occupied orbitals, # the 'S'ingly occupied (i.e. index > self.nbeta) and # the 'V'irtual orbitals (index > self.nalpha) # from or to which the excitation can go: # this is supposed to be the standard case: n_minor=self.nbeta n_major=self.nalpha # but this also can appear if self.nbeta > self.nalpha: n_minor=self.nalpha n_major=self.nbeta # split 'line' by '(' to get three strings due to double occurence of '('. # From the first and second string (i.e. before the parentheses), take the last character. if re_match.group(1) == "D": startidx = int(indices[0]) - 1 elif re_match.group(1) == "S": startidx = int(indices[0]) - 1 + n_minor assert startidx < n_major else: startidx=-15 assert "invalid from_occ" if re_match.group(3) == "S": endidx = int(indices[1]) - 1 + n_minor assert endidx < n_major elif re_match.group(3) == "V": endidx = int(indices[1]) - 1 + n_major else: assert "invalid to_occ" else: startidx = int(re_match.group(2)) - 1 endidx = int(re_match.group(4)) - 1 + self.nalpha contrib = float(re_match.group(5)) start = (startidx, spin) end = (endidx, spin) if ttype == 'X': sec.append([start, end, contrib]) elif ttype == 'Y': sec.append([end, start, contrib]) else: raise ValueError('Unknown transition type: %s' % ttype) line = next(inputfile) etsecs.append(sec) line = next(inputfile) self.set_attribute('etenergies', etenergies) self.set_attribute('etsyms', etsyms) self.set_attribute('etoscs', etoscs) self.set_attribute('etsecs', etsecs) # Static and dynamic polarizability from mopropman. if 'Polarizability (a.u.)' in line: if not hasattr(self, 'polarizabilities'): self.polarizabilities = [] while 'Full Tensor' not in line: line = next(inputfile) self.skip_line(inputfile, 'blank') polarizability = [next(inputfile).split() for _ in range(3)] self.polarizabilities.append(numpy.array(polarizability)) # Static polarizability from finite difference or # responseman. if line.strip() in ('Static polarizability tensor [a.u.]', 'Polarizability tensor [a.u.]'): if not hasattr(self, 'polarizabilities'): self.polarizabilities = [] polarizability = [next(inputfile).split() for _ in range(3)] self.polarizabilities.append(numpy.array(polarizability)) # Molecular orbital energies and symmetries. if line.strip() == 'Orbital Energies (a.u.) and Symmetries': # -------------------------------------------------------------- # Orbital Energies (a.u.) and Symmetries # -------------------------------------------------------------- # # Alpha MOs, Restricted # -- Occupied -- # -10.018 -10.018 -10.008 -10.008 -10.007 -10.007 -10.006 -10.005 # 1 Bu 1 Ag 2 Bu 2 Ag 3 Bu 3 Ag 4 Bu 4 Ag # -9.992 -9.992 -0.818 -0.755 -0.721 -0.704 -0.670 -0.585 # 5 Ag 5 Bu 6 Ag 6 Bu 7 Ag 7 Bu 8 Bu 8 Ag # -0.561 -0.532 -0.512 -0.462 -0.439 -0.410 -0.400 -0.397 # 9 Ag 9 Bu 10 Ag 11 Ag 10 Bu 11 Bu 12 Bu 12 Ag # -0.376 -0.358 -0.349 -0.330 -0.305 -0.295 -0.281 -0.263 # 13 Bu 14 Bu 13 Ag 1 Au 15 Bu 14 Ag 15 Ag 1 Bg # -0.216 -0.198 -0.160 # 2 Au 2 Bg 3 Bg # -- Virtual -- # 0.050 0.091 0.116 0.181 0.280 0.319 0.330 0.365 # 3 Au 4 Au 4 Bg 5 Au 5 Bg 16 Ag 16 Bu 17 Bu # 0.370 0.413 0.416 0.422 0.446 0.469 0.496 0.539 # 17 Ag 18 Bu 18 Ag 19 Bu 19 Ag 20 Bu 20 Ag 21 Ag # 0.571 0.587 0.610 0.627 0.646 0.693 0.743 0.806 # 21 Bu 22 Ag 22 Bu 23 Bu 23 Ag 24 Ag 24 Bu 25 Ag # 0.816 # 25 Bu # # Beta MOs, Restricted # -- Occupied -- # -10.018 -10.018 -10.008 -10.008 -10.007 -10.007 -10.006 -10.005 # 1 Bu 1 Ag 2 Bu 2 Ag 3 Bu 3 Ag 4 Bu 4 Ag # -9.992 -9.992 -0.818 -0.755 -0.721 -0.704 -0.670 -0.585 # 5 Ag 5 Bu 6 Ag 6 Bu 7 Ag 7 Bu 8 Bu 8 Ag # -0.561 -0.532 -0.512 -0.462 -0.439 -0.410 -0.400 -0.397 # 9 Ag 9 Bu 10 Ag 11 Ag 10 Bu 11 Bu 12 Bu 12 Ag # -0.376 -0.358 -0.349 -0.330 -0.305 -0.295 -0.281 -0.263 # 13 Bu 14 Bu 13 Ag 1 Au 15 Bu 14 Ag 15 Ag 1 Bg # -0.216 -0.198 -0.160 # 2 Au 2 Bg 3 Bg # -- Virtual -- # 0.050 0.091 0.116 0.181 0.280 0.319 0.330 0.365 # 3 Au 4 Au 4 Bg 5 Au 5 Bg 16 Ag 16 Bu 17 Bu # 0.370 0.413 0.416 0.422 0.446 0.469 0.496 0.539 # 17 Ag 18 Bu 18 Ag 19 Bu 19 Ag 20 Bu 20 Ag 21 Ag # 0.571 0.587 0.610 0.627 0.646 0.693 0.743 0.806 # 21 Bu 22 Ag 22 Bu 23 Bu 23 Ag 24 Ag 24 Bu 25 Ag # 0.816 # 25 Bu # -------------------------------------------------------------- self.skip_line(inputfile, 'dashes') line = next(inputfile) energies_alpha, symbols_alpha, homo_alpha = self.parse_orbital_energies_and_symmetries(inputfile) # Only look at the second block if doing an unrestricted calculation. # This might be a problem for ROHF/ROKS. if self.unrestricted: energies_beta, symbols_beta, homo_beta = self.parse_orbital_energies_and_symmetries(inputfile) # For now, only keep the last set of MO energies, even though it is # printed at every step of geometry optimizations and fragment jobs. self.set_attribute('moenergies', [numpy.array(energies_alpha)]) self.set_attribute('homos', [homo_alpha]) self.set_attribute('mosyms', [symbols_alpha]) if self.unrestricted: self.moenergies.append(numpy.array(energies_beta)) self.homos.append(homo_beta) self.mosyms.append(symbols_beta) self.set_attribute('nmo', len(self.moenergies[0])) # Molecular orbital energies, no symmetries. if line.strip() == 'Orbital Energies (a.u.)': # In the case of no orbital symmetries, the beta spin block is not # present for restricted calculations. # -------------------------------------------------------------- # Orbital Energies (a.u.) # -------------------------------------------------------------- # # Alpha MOs # -- Occupied -- # ******* -38.595 -34.580 -34.579 -34.578 -19.372 -19.372 -19.364 # -19.363 -19.362 -19.362 -4.738 -3.252 -3.250 -3.250 -1.379 # -1.371 -1.369 -1.365 -1.364 -1.362 -0.859 -0.855 -0.849 # -0.846 -0.840 -0.836 -0.810 -0.759 -0.732 -0.729 -0.704 # -0.701 -0.621 -0.610 -0.595 -0.587 -0.584 -0.578 -0.411 # -0.403 -0.355 -0.354 -0.352 # -- Virtual -- # -0.201 -0.117 -0.099 -0.086 0.020 0.031 0.055 0.067 # 0.075 0.082 0.086 0.092 0.096 0.105 0.114 0.148 # # Beta MOs # -- Occupied -- # ******* -38.561 -34.550 -34.549 -34.549 -19.375 -19.375 -19.367 # -19.367 -19.365 -19.365 -4.605 -3.105 -3.103 -3.102 -1.385 # -1.376 -1.376 -1.371 -1.370 -1.368 -0.863 -0.858 -0.853 # -0.849 -0.843 -0.839 -0.818 -0.765 -0.738 -0.737 -0.706 # -0.702 -0.624 -0.613 -0.600 -0.591 -0.588 -0.585 -0.291 # -0.291 -0.288 -0.275 # -- Virtual -- # -0.139 -0.122 -0.103 0.003 0.014 0.049 0.049 0.059 # 0.061 0.070 0.076 0.081 0.086 0.090 0.098 0.106 # 0.138 # -------------------------------------------------------------- self.skip_line(inputfile, 'dashes') line = next(inputfile) energies_alpha, _, homo_alpha = self.parse_orbital_energies_and_symmetries(inputfile) # Only look at the second block if doing an unrestricted calculation. # This might be a problem for ROHF/ROKS. if self.unrestricted: energies_beta, _, homo_beta = self.parse_orbital_energies_and_symmetries(inputfile) # For now, only keep the last set of MO energies, even though it is # printed at every step of geometry optimizations and fragment jobs. self.set_attribute('moenergies', [numpy.array(energies_alpha)]) self.set_attribute('homos', [homo_alpha]) if self.unrestricted: self.moenergies.append(numpy.array(energies_beta)) self.homos.append(homo_beta) self.set_attribute('nmo', len(self.moenergies[0])) # Molecular orbital coefficients. # This block comes from `print_orbitals = true/{int}`. Less # precision than `scf_final_print >= 2` for `mocoeffs`, but # important for `aonames` and `atombasis`. if any(header in line for header in self.alpha_mo_coefficient_headers): # If we've asked to display more virtual orbitals than # there are MOs present in the molecule, fix that now. if hasattr(self, 'nmo') and hasattr(self, 'nalpha') and hasattr(self, 'nbeta'): self.norbdisp_alpha_aonames = min(self.norbdisp_alpha_aonames, self.nmo) self.norbdisp_beta_aonames = min(self.norbdisp_beta_aonames, self.nmo) if not hasattr(self, 'mocoeffs'): self.mocoeffs = [] if not hasattr(self, 'atombasis'): self.atombasis = [] for n in range(self.natom): self.atombasis.append([]) if not hasattr(self, 'aonames'): self.aonames = [] # We could also attempt to parse `moenergies` here, but # nothing is gained by it. mocoeffs = self.parse_matrix_aonames(inputfile, self.nbasis, self.norbdisp_alpha_aonames) # Only use these MO coefficients if we don't have them # from `scf_final_print`. if len(self.mocoeffs) == 0: self.mocoeffs.append(mocoeffs.transpose()) # Go back through `aonames` to create `atombasis`. assert len(self.aonames) == self.nbasis for aoindex, aoname in enumerate(self.aonames): atomindex = int(self.re_atomindex.search(aoname).groups()[0]) - 1 self.atombasis[atomindex].append(aoindex) assert len(self.atombasis) == len(self.atomnos) if 'BETA MOLECULAR ORBITAL COEFFICIENTS' in line: mocoeffs = self.parse_matrix_aonames(inputfile, self.nbasis, self.norbdisp_beta_aonames) if len(self.mocoeffs) == 1: self.mocoeffs.append(mocoeffs.transpose()) # Population analysis. if 'Ground-State Mulliken Net Atomic Charges' in line: self.parse_charge_section(inputfile, 'mulliken') if 'Hirshfeld Atomic Charges' in line: self.parse_charge_section(inputfile, 'hirshfeld') if 'Ground-State ChElPG Net Atomic Charges' in line: self.parse_charge_section(inputfile, 'chelpg') # Multipole moments are not printed in lexicographical order, # so we need to parse and sort them. The units seem OK, but there # is some uncertainty about the reference point and whether it # can be changed. # # Notice how the letter/coordinate labels change to coordinate ranks # after hexadecapole moments, and need to be translated. Additionally, # after 9-th order moments the ranks are not necessarily single digits # and so there are spaces between them. # # ----------------------------------------------------------------- # Cartesian Multipole Moments # LMN = < X^L Y^M Z^N > # ----------------------------------------------------------------- # Charge (ESU x 10^10) # 0.0000 # Dipole Moment (Debye) # X 0.0000 Y 0.0000 Z 0.0000 # Tot 0.0000 # Quadrupole Moments (Debye-Ang) # XX -50.9647 XY -0.1100 YY -50.1441 # XZ 0.0000 YZ 0.0000 ZZ -58.5742 # ... # 5th-Order Moments (Debye-Ang^4) # 500 0.0159 410 -0.0010 320 0.0005 # 230 0.0000 140 0.0005 050 0.0012 # ... # ----------------------------------------------------------------- # if "Cartesian Multipole Moments" in line: # This line appears not by default, but only when # `multipole_order` > 4: line = inputfile.next() if 'LMN = < X^L Y^M Z^N >' in line: line = inputfile.next() # The reference point is always the origin, although normally the molecule # is moved so that the center of charge is at the origin. self.reference = [0.0, 0.0, 0.0] self.moments = [self.reference] # Watch out! This charge is in statcoulombs without the exponent! # We should expect very good agreement, however Q-Chem prints # the charge only with 5 digits, so expect 1e-4 accuracy. charge_header = inputfile.next() assert charge_header.split()[0] == "Charge" charge = float(inputfile.next().strip()) charge = utils.convertor(charge, 'statcoulomb', 'e') * 1e-10 # Allow this to change until fragment jobs are properly implemented. # assert abs(charge - self.charge) < 1e-4 # This will make sure Debyes are used (not sure if it can be changed). line = inputfile.next() assert line.strip() == "Dipole Moment (Debye)" while "-----" not in line: # The current multipole element will be gathered here. multipole = [] line = inputfile.next() while ("-----" not in line) and ("Moment" not in line): cols = line.split() # The total (norm) is printed for dipole but not other multipoles. if cols[0] == 'Tot': line = inputfile.next() continue # Find and replace any 'stars' with NaN before moving on. for i in range(len(cols)): if '***' in cols[i]: cols[i] = numpy.nan # The moments come in pairs (label followed by value) up to the 9-th order, # although above hexadecapoles the labels are digits representing the rank # in each coordinate. Above the 9-th order, ranks are not always single digits, # so there are spaces between them, which means moments come in quartets. if len(self.moments) < 5: for i in range(len(cols)//2): lbl = cols[2*i] m = cols[2*i + 1] multipole.append([lbl, m]) elif len(self.moments) < 10: for i in range(len(cols)//2): lbl = cols[2*i] lbl = 'X'*int(lbl[0]) + 'Y'*int(lbl[1]) + 'Z'*int(lbl[2]) m = cols[2*i + 1] multipole.append([lbl, m]) else: for i in range(len(cols)//4): lbl = 'X'*int(cols[4*i]) + 'Y'*int(cols[4*i + 1]) + 'Z'*int(cols[4*i + 2]) m = cols[4*i + 3] multipole.append([lbl, m]) line = inputfile.next() # Sort should use the first element when sorting lists, # so this should simply work, and afterwards we just need # to extract the second element in each list (the actual moment). multipole.sort() multipole = [m[1] for m in multipole] self.moments.append(multipole) # For `method = force` or geometry optimizations, # the gradient is printed. if any(header in line for header in self.gradient_headers): if not hasattr(self, 'grads'): self.grads = [] if 'SCF' in line: ncolsblock = self.ncolsblock else: ncolsblock = 5 grad = QChem.parse_matrix(inputfile, 3, self.natom, ncolsblock) self.grads.append(grad.T) # (Static) polarizability from frequency calculations. if 'Polarizability Matrix (a.u.)' in line: if not hasattr(self, 'polarizabilities'): self.polarizabilities = [] polarizability = [] self.skip_line(inputfile, 'index header') for _ in range(3): line = next(inputfile) ss = line.strip()[1:] polarizability.append([ss[0:12], ss[13:24], ss[25:36]]) # For some reason the sign is inverted. self.polarizabilities.append(-numpy.array(polarizability, dtype=float)) # For IR-related jobs, the Hessian is printed (dim: 3*natom, 3*natom). # Note that this is *not* the mass-weighted Hessian. if any(header in line for header in self.hessian_headers): dim = 3*self.natom self.set_attribute( "hessian", QChem.parse_matrix(inputfile, dim, dim, self.ncolsblock) ) # Start of the IR/Raman frequency section. if 'VIBRATIONAL ANALYSIS' in line: vibfreqs = [] vibfconsts = [] vibrmasses = [] vibirs = [] vibramans = [] vibdisps = [] while 'STANDARD THERMODYNAMIC QUANTITIES' not in line: ## IR, optional Raman: # # ********************************************************************** # ** ** # ** VIBRATIONAL ANALYSIS ** # ** -------------------- ** # ** ** # ** VIBRATIONAL FREQUENCIES (CM**-1) AND NORMAL MODES ** # ** FORCE CONSTANTS (mDYN/ANGSTROM) AND REDUCED MASSES (AMU) ** # ** INFRARED INTENSITIES (KM/MOL) ** ##** RAMAN SCATTERING ACTIVITIES (A**4/AMU) AND DEPOLARIZATION RATIOS ** # ** ** # ********************************************************************** # # # Mode: 1 2 3 # Frequency: -106.88 -102.91 161.77 # Force Cnst: 0.0185 0.0178 0.0380 # Red. Mass: 2.7502 2.8542 2.4660 # IR Active: NO YES YES # IR Intens: 0.000 0.000 0.419 # Raman Active: YES NO NO ##Raman Intens: 2.048 0.000 0.000 ##Depolar: 0.750 0.000 0.000 # X Y Z X Y Z X Y Z # C 0.000 0.000 -0.100 -0.000 0.000 -0.070 -0.000 -0.000 -0.027 # C 0.000 0.000 0.045 -0.000 0.000 -0.074 0.000 -0.000 -0.109 # C 0.000 0.000 0.148 -0.000 -0.000 -0.074 0.000 0.000 -0.121 # (...) # H -0.000 -0.000 0.422 -0.000 -0.000 0.499 0.000 0.000 -0.285 # TransDip 0.000 -0.000 -0.000 0.000 -0.000 -0.000 -0.000 0.000 0.021 # # Mode: 4 5 6 # ... # # There isn't any symmetry information for normal modes present # in Q-Chem. # if not hasattr(self, 'vibsyms'): # self.vibsyms = [] if 'Frequency:' in line: vibfreqs.extend(map(float, line.split()[1:])) if 'Force Cnst:' in line: vibfconsts.extend(map(float, line.split()[2:])) if 'Red. Mass' in line: vibrmasses.extend(map(float, line.split()[2:])) if 'IR Intens:' in line: vibirs.extend(map(float, line.split()[2:])) if 'Raman Intens:' in line: vibramans.extend(map(float, line.split()[2:])) # This is the start of the displacement block. if line.split()[0:3] == ['X', 'Y', 'Z']: disps = [] for k in range(self.natom): line = next(inputfile) numbers = list(map(float, line.split()[1:])) N = len(numbers) // 3 if not disps: for n in range(N): disps.append([]) for n in range(N): disps[n].append(numbers[3*n:(3*n)+3]) vibdisps.extend(disps) line = next(inputfile) # Anharmonic vibrational analysis. # Q-Chem includes 3 theories: VPT2, TOSH, and VCI. # For now, just take the VPT2 results. # if 'VIBRATIONAL ANHARMONIC ANALYSIS' in line: # while list(set(line.strip())) != ['=']: # if 'VPT2' in line: # if not hasattr(self, 'vibanharms'): # self.vibanharms = [] # self.vibanharms.append(float(line.split()[-1])) # line = next(inputfile) if vibfreqs: self.set_attribute("vibfreqs", vibfreqs) if vibfconsts: self.set_attribute("vibfconsts", vibfconsts) if vibrmasses: self.set_attribute("vibrmasses", vibrmasses) if vibirs: self.set_attribute("vibirs", vibirs) if vibramans: self.set_attribute("vibramans", vibramans) if vibdisps: self.set_attribute("vibdisps", vibdisps) if 'STANDARD THERMODYNAMIC QUANTITIES AT' in line: if not hasattr(self, 'temperature'): self.temperature = float(line.split()[4]) # Not supported yet. if not hasattr(self, 'pressure'): self.pressure = float(line.split()[7]) self.skip_line(inputfile, 'blank') line = next(inputfile) if self.natom == 1: assert 'Translational Enthalpy' in line else: assert 'Imaginary Frequencies' in line line = next(inputfile) # Not supported yet. assert 'Zero point vibrational energy' in line if not hasattr(self, 'zpe'): # Convert from kcal/mol to Hartree/particle. self.zpve = utils.convertor(float(line.split()[4]), 'kcal/mol', 'hartree') atommasses = [] while 'Translational Enthalpy' not in line: if 'Has Mass' in line: atommass = float(line.split()[6]) atommasses.append(atommass) line = next(inputfile) if not hasattr(self, 'atommasses'): self.atommasses = numpy.array(atommasses) while line.strip(): line = next(inputfile) line = next(inputfile) assert 'Total Enthalpy' in line if not hasattr(self, 'enthalpy'): enthalpy = float(line.split()[2]) self.enthalpy = utils.convertor(enthalpy, 'kcal/mol', 'hartree') line = next(inputfile) assert 'Total Entropy' in line if not hasattr(self, 'entropy'): entropy = float(line.split()[2]) / 1000 # This is the *temperature dependent* entropy. self.entropy = utils.convertor(entropy, 'kcal/mol', 'hartree') if not hasattr(self, 'freeenergy'): self.freeenergy = self.enthalpy - self.entropy * self.temperature # Extract total elapsed (wall) and CPU job times if line[:16] == ' Total job time:': self.metadata['success'] = True # create empty list for the times to be stored in if not "wall_time" in self.metadata: self.metadata['wall_time'] = [] if not "cpu_time" in self.metadata: self.metadata['cpu_time'] = [] # the line format is " Total job time: 120.37s(wall), 2251.02s(cpu)" at the end of each job ran. # first split the line by white space try: a = line.split() # next split the second to last entry at the 's' to pull wall time # cast as a float for use in timedelta data structure wall_td = datetime.timedelta(seconds=float(a[-2].split('s')[0])) # next split the last entry at the 's' to pull cpu time # cast as a float for use in timedelta data structure cpu_td = datetime.timedelta(seconds=float(a[-1].split('s')[0])) self.metadata['wall_time'].append(wall_td) self.metadata['cpu_time'].append(cpu_td) except: pass
berquist/cclib
cclib/parser/qchemparser.py
Python
bsd-3-clause
81,907
[ "Gaussian", "Q-Chem", "cclib" ]
9af65cf8f57a4d6427829b4abc53b41d172397c519a2565aacc1c546c3e38dd2
# -*- coding: iso-8859-1 -*- from array import array import sys # Tant qu'à faire vu qu'ici le nombre de caractères par noeud est complètement # dynamique, on peut se lâcher, mettre 1024 caractère ou plus CHARS_PER_NODE = 1024 class tst_node(object): """ classe représentant un noeud du TST """ # __slots__ est une optimisation permettant de créer des objets Python # non dynamiques, ce qui utilise moins de mémoire __slots__ = ['chars','data','next','left','right'] instances = 0 def __init__(self): tst_node.instances += 1 self.chars = array('c') self.data = None self.next = None self.left = None self.right = None def __repr__(self): return "node(%s,data=%s,%i,%i,%i)"%( self.chars, self.data, self.left is not None and 1 or 0, self.next is not None and 1 or 0, self.right is not None and 1 or 0, ) class balance_info(object): __slots__ = ['did_balance','height','balance','right_balance','left_balance'] def __init__(self,did_balance=False,height=0,balance=0,right_balance=0,left_balance=0): self.did_balance = did_balance self.height = height self.balance = balance self.right_balance = right_balance self.left_balance = left_balance def __repr__(self): return "balance_info(d=%s,h=%s,b=%s,l=%s,r=%s)"%( self.did_balance, self.height, self.balance, self.left_balance, self.right_balance ) class compact_tst(object): """ Implémentation d'un TST compact """ def __init__(self): self.root = None def __getitem__(self,string): """ Lit dans l'arbre selon la syntaxe tst[string] """ # ATTENTION : ce code est intentionnellement dupliqué dans la méthode # visit(). Ne pas oublier de mettre celle-ci à jour lorsqu'il est modifié # ici. node = self.root index = 0 while node is not None: local_index = 0 # On avance tant qu'il y a égalité diff = 0 while local_index < len(node.chars) and index < len(string): diff = cmp(string[index],node.chars[local_index]) if diff == 0: local_index += 1 index += 1 else: break if diff != 0: # On a une différence de caractère if local_index < len(node.chars) - 1: # on s'est arrêté avant le dernier caractère du noeud, # il n'y a donc pas de match possible (sinon il y aurait eu # split à l'insertion) return None elif diff>0: node = node.left else: # diff<0 node = node.right elif local_index == len(node.chars): # On est au bout des caractères du noeud if index == len(string): # On est également au bout de la clé # C'est qu'on a de la chance ! return node.data else: # On avance d'un cran node = node.next else: # On n'est pas au bout des caractères, c'est qu'on est au # bout de la clé, et donc qu'il n'y a pas de match, sinon # il y aurait eu un split assert index == len(string) return None # node is None ==> pas de match return None def __setitem__(self,string,value): """ Ecrit dans l'arbre selon la syntaxe tst[string] = value """ self.root, discard = self._insert(string,value,0,self.root) assert self[string] == value def _insert(self,string,value,index,node): if node is None: return self._new_node(string,value,index) local_index = 0 # On avance tant qu'il y a égalité diff = 0 while local_index < len(node.chars) and index<len(string): diff = cmp(string[index],node.chars[local_index]) if diff == 0: local_index += 1 index += 1 else: break if diff!=0: assert local_index < len(node.chars) and index<len(string) # On a trouvé un point de divergence avant le dernier caractère du # noeud, et de la clé, il va donc falloir splitter if local_index < len(node.chars) - 1: node, balance = self._split_node(node,local_index) # On peut essayer de joindre le noeud suivant au noeud d'après # car le split peut permettre de compléter un noeud pas totalement # remplit node.next, discard = self._compact_node(node.next,None) # Maintenant que le split est fait, on peut continuer à positionner # la nouvelle clé balance = balance_info() if diff>0: node.left, left_balance = self._insert(string,value,index,node.left) balance.did_balance = left_balance.did_balance right_balance = self._compute_balance(node.right) else: node.right, right_balance = self._insert(string,value,index,node.right) balance.did_balance = right_balance.did_balance left_balance = self._compute_balance(node.left) # On calcule la nouvelle balance en tenant compte des modifs if len(node.chars)>1: balance.height = 1 else: balance.height = max(left_balance.height, right_balance.height) + 1 balance.balance = left_balance.height - right_balance.height balance.left_balance = left_balance.balance balance.right_balance = right_balance.balance if not balance.did_balance: # On effectue la balance si elle n'a pas déjà été effectuée node, balance = self._balance(node,balance) if len(node.chars)!=1: # Si à l'issue de la balance on se retrouve avec plusieurs # caractères, alors la hauteur du nouveau noeud est 1. balance.height = 1 balance.balance = 0 balance.left_balance = 0 balance.right_balance = 0 return node, balance elif local_index == len(node.chars): # On est arrivé au bout des caractères du noeud # sans différence if index == len(string): # On est également au bout de la clé # C'est qu'on a de la chance ! node.data = value else: # On n'est pas au bout de la clé node.next, next_balance = self._insert(string,value,index,node.next) # Suite à un split du noeud suivant, il est peut-être possible # de le recoller à ce noeud ? node, discard = self._compact_node(node, None) return node, self._compute_balance(node) else: # On est arrivé au bout de la clé, mais pas au bout des caractères # du noeud assert index == len(string) # On est au bout de la clé, mais avant la fin des caractères du # noeud ; il faut donc splitter, mais au local_index précédent car # on a bêtement avancé les deux à la fois aux lignes 105 - 106 node, balance = self._split_node(node,local_index-1) # On peut essayer de joindre le noeud suivant au noeud d'après node.next, discard = self._compact_node(node.next,None) # On stocke ensuite la clé et la valeur node.data = value return node, balance def __delitem__(self,string): self.root, discard = self._remove(string,0,self.root) def _remove(self,string,index,node): if node is None: return None local_index = 0 # On avance tant qu'il y a égalité diff = 0 while local_index < len(node.chars) and index<len(string): diff = cmp(string[index],node.chars[local_index]) if diff == 0: local_index += 1 index += 1 else: break if diff!=0: assert local_index < len(node.chars) and index<len(string) # On a trouvé un point de divergence avant le dernier caractère du # noeud, et de la clé, il va donc falloir splitter if local_index < len(node.chars) - 1: return node, self._compute_balance(node) # Maintenant que le split est fait, on peut continuer à positionner # la nouvelle clé balance = balance_info() if diff>0: node.left, left_balance = self._remove(string,index,node.left) balance.did_balance = left_balance.did_balance node, balance = self._compact_node(node,balance,True) right_balance = self._compute_balance(node.right) else: node.right, right_balance = self._remove(string,index,node.right) balance.did_balance = right_balance.did_balance node, balance = self._compact_node(node,balance,True) left_balance = self._compute_balance(node.left) # On calcule la nouvelle balance en tenant compte des modifs if len(node.chars)>1: balance.height = 1 else: balance.height = max(left_balance.height, right_balance.height) + 1 balance.balance = left_balance.height - right_balance.height balance.left_balance = left_balance.balance balance.right_balance = right_balance.balance if not balance.did_balance: # On effectue la balance si elle n'a pas déjà été effectuée node, balance = self._balance(node,balance) if len(node.chars)!=1: # Si à l'issue de la balance on se retrouve avec plusieurs # caractères, alors la hauteur du nouveau noeud est 1. balance.height = 1 balance.balance = 0 balance.left_balance = 0 balance.right_balance = 0 # return self._compact_node(node,balance) return node, balance elif local_index == len(node.chars): # On est arrivé au bout des caractères du noeud # sans différence if index == len(string): # On est également au bout de la clé # C'est qu'on a de la chance ! node.data = None else: # On n'est pas au bout de la clé node.next, next_balance = self._remove(string,index,node.next) # Suite à un split du noeud suivant, il est peut-être possible # de le recoller à ce noeud ? # node, discard = self._compact_node(node, None) return self._compact_node(node,self._compute_balance(node)) else: # On est arrivé au bout de la clé, mais pas au bout des caractères # du noeud assert index == len(string) # On est au bout de la clé, mais avant la fin des caractères du # noeud return node, balance def _compute_balance(self,node): if node is not None: if len(node.chars)>1: # La hauteur d'un noeud contenant plusieurs caractères # est forcément 1 return balance_info(height=1) else: balance = balance_info() # Récursion sur les noeuds fils left_balance = self._compute_balance(node.left) right_balance = self._compute_balance(node.right) balance.did_balance = False balance.height = max(left_balance.height, right_balance.height) + 1 balance.balance = left_balance.height - right_balance.height balance.left_balance = left_balance.balance balance.right_balance = right_balance.balance return balance else: # La hauteur d'un noeud non existant est 0, ce qui fait # que la hauteur d'une feuille est 1 return balance_info() def _balance(self,node,balance): assert balance.height == self._compute_balance(node).height, (node, balance, self._compute_balance(node)) assert len(node.chars)>1 or balance.balance == self._compute_balance(node).balance, "%s : %s != %s"%(node,balance,self._compute_balance(node)) assert balance.left_balance == self._compute_balance(node.left).balance assert balance.right_balance == self._compute_balance(node.right).balance assert -2 < balance.left_balance < 2 assert -2 < balance.right_balance < 2 assert -3 < balance.balance < 3 assert -2 < self._compute_balance(node.left).balance < 2 assert -2 < self._compute_balance(node.right).balance < 2 assert -3 < self._compute_balance(node).balance < 3 # Assure le critère AVL if balance.balance > 1: if balance.left_balance > 0: node, balance = self._ll(node,balance) else: node, balance = self._lr(node,balance) balance.did_balance = True elif balance.balance < -1: if balance.right_balance < 0: node, balance = self._rr(node,balance) else: node, balance = self._rl(node,balance) balance.did_balance = True assert -2 < self._compute_balance(node).balance < 2,(node,self._compute_balance(node)) assert -2 < balance.balance < 2 return node, balance def _ll(self,node,balance): # Un déséquilibre LL n'est possible qu'avec un noeud de gauche # n'ayant qu'un caractère assert len(node.left.chars) == 1 # On fait la rotation au niveau des liens left_node = node.left node.left = left_node.right left_node.right = node # Et maintenant on ramène tous les caractères du noeud d'origine sauf son # dernier, et on les insère au début du noeud de gauche. # En gros si dans le noeud d'origine on a abcdefG # et dans le noeud de gauche on a juste B # A la fin on a dans le noeud d'origine juste G # et dans le noeud de gauche abcdefB new_char = node.chars.pop() node.chars.append(left_node.chars.pop()) left_node.chars.append(new_char) node.chars, left_node.chars = left_node.chars, node.chars # Il est possible que le noeud d'origine soit concaténable avec la suite left_node.right, discard = self._compact_node(left_node.right,None) # On ajuste la balance en fonction de l'opération effectuée balance.height -= 1 balance.balance = 0 balance.left_balance = 0 # Le noeud de gauche prend la place du noeud d'origine dans l'arbre return left_node, balance def _rr(self,node,balance): assert len(node.right.chars) == 1 right_node = node.right node.right = right_node.left right_node.left = node new_char = node.chars.pop() node.chars.append(right_node.chars.pop()) right_node.chars.append(new_char) node.chars, right_node.chars = right_node.chars, node.chars right_node.left, discard = self._compact_node(right_node.left,None) balance.height -= 1 balance.balance = 0 balance.right_balance = 0 return right_node, balance def _lr(self,node,balance): if len(node.left.right.chars)>1: node.left.right, discard = self._split_node(node.left.right,0) node.left, discard = self._rr(node.left,balance_info()) node, balance = self._ll(node,balance) return node, balance def _rl(self,node,balance): if len(node.right.left.chars)>1: node.right.left, discard = self._split_node(node.right.left,0) node.right, discard = self._ll(node.right,balance_info()) node, balance = self._rr(node,balance) return node, balance def _split_node(self,node,local_index): """ Découpe un noeud à l'index donné """ assert local_index < len(node.chars) # On crée un nouveau noeud new_node = tst_node() # On prend tout le début du segment de clé du noeud y compris # le caractère qui diffère et on les met dans le nouveau noeud new_node.chars = node.chars[0:local_index + 1] # La suite de ce nouveau noeud est l'ancien noeud new_node.next = node # On adapte la chaîne dans l'ancien noeud, c'est le restant de # la chaîne après le split node.chars = node.chars[local_index + 1:] return new_node, balance_info(height=1) def _compact_node(self,node,balance,debug=False): """ Tente de ressouder un noeud à son noeud suivant si cela est possible """ if node is None: return None, balance_info() elif node.data is not None: return node, balance elif ( node.next is not None and node.left is None and node.right is None and len(node.chars)+len(node.next.chars)<CHARS_PER_NODE ): # Les quatre conditions ci dessus sont : # - on a un noeud suivant # - le noeud actuel n'est pas un pivot (dans ce cas la concaténation # serait impossible) # - le noeud actuel ne contient pas de données (idem) # - il y a de la place pour les caractères du noeud courant dans # le noeud suivant if debug: print "CAT", node, node.next, node.chars.extend(node.next.chars) node.next.chars = node.chars if debug: print '=>',node.next return node.next, self._compute_balance(node.next) elif ( node.next is None and (node.left is None or node.right is None) ): if node.left is None and node.right is None: if debug: print "CAT2", node, "=>", None return None, balance_info() else: # On prend le noeud restant new_node = node.left or node.right if len(node.chars)+len(new_node.chars) - 1 < CHARS_PER_NODE: if debug: print "CAT3", node, new_node, # On supprime le dernier caractère du noeud node.chars.pop() # On ajoute les caractères du nouveau noeud node.chars.extend(new_node.chars) # On met le résultat dans le nouveau noeud new_node.chars = node.chars if debug: print '=>',new_node return new_node, self._compute_balance(new_node) else: return node, balance else: return node, balance def _new_node(self,string,value,index): new_node = tst_node() # On remplit le segment du noeud avec un maximum de caractères de la clé # en partant de l'index donné length = min(len(string)-index,CHARS_PER_NODE) new_node.chars.extend(string[index:index+length]) if index+length<len(string): # s'il reste des caractères dans la clé après ce segment... new_node.next, discard = self._new_node(string,value,index+length) else: # sinon on met la clé et la donnée dans ce noeud new_node.data = value return new_node, balance_info(height=1) def stats(self,node,acc,depth=0): if node == None : return acc['nodes'] = acc.get('nodes',0) + 1 acc['total_chars'] = acc.get('total_chars',0) + len(node.chars) key = ('nbchars',len(node.chars)) acc[key] = acc.get(key,0) + 1 links = ((node.left is not None and 1) or 0) + ((node.next is not None and 1) or 0) + ((node.right is not None and 1) or 0) key = ('links',links) acc[key] = acc.get(key,0) + 1 key = ('depth',depth) acc[key] = acc.get(key,0) + 1 self.stats(node.left,acc,depth+1) self.stats(node.next,acc,depth+1) self.stats(node.right,acc,depth+1) return acc def visit(self,callback,string=None): if string is not None: # Ce code est copié / collé depuis _find_node(). # C'est fait exprès, car cela évite d'avoir instanciation d'un # tuple pour retour de valeur multiple à chaque appel de __getitem__. node = self.root index = 0 while node is not None: local_index = 0 # On avance tant qu'il y a égalité diff = 0 while local_index < len(node.chars) and index < len(string): diff = cmp(string[index],node.chars[local_index]) if diff == 0: local_index += 1 index += 1 else: break if diff != 0: # On a une différence de caractère if local_index < len(node.chars) - 1: # on s'est arrêté avant le dernier caractère du noeud, # il n'y a donc pas de match possible (sinon il y aurait eu # split à l'insertion) node = None break else: # différence au dernier caractère du noeud if diff>0: node = node.left elif diff<0: node = node.right elif local_index == len(node.chars): # On est au bout des caractères du noeud if index == len(string): # On est également au bout de la clé # C'est qu'on a de la chance ! break else: # On avance d'un cran node = node.next else: # On n'est pas au bout des caractères, c'est qu'on est au # bout de la clé, et donc qu'il n'y a pas de match, sinon # il y aurait eu un split assert index == len(string) # On retourne le noeud quand même car il va être utile pour # le visiteur, simplement il n'y a aucune donnée dedans break if node is None: return False else: key = string[:len(string)-local_index] return self._visit(node,array('c',key),callback,local_index<len(node.chars)) else: return self._visit(self.root,array('c'),callback,True) def _visit(self,node,string,callback,visit_left_right): if node is None: return False # D'abord à droite pour obtenir un ordre lexicographique if visit_left_right and self._visit(node.right,string+node.chars[:-1],callback,True): return True # Maintenant le noeud en cours if node.data is not None and callback(string+node.chars,node.data): return True # Puis le noeud suivant if self._visit(node.next,string+node.chars,callback,True): return True # Puis à gauche if visit_left_right and self._visit(node.left,string+node.chars[:-1],callback,True): return True return False def cat(self,node,debug=False): """ Méthode forçant la concaténation des noeuds, inutile sauf en cas de bug. """ if node == None : return node.left = self.cat(node.left,debug) node.next = self.cat(node.next,debug) node.right = self.cat(node.right,debug) node, discard = self._compact_node(node,None,debug) return node def debug(self,node,indentation=0): print node.chars,node.data if node.left is not None: print ' '*(indentation+1),'LEFT', self.debug(node.left,indentation+1) if node.next is not None: print ' '*(indentation+1),'NEXT', self.debug(node.next,indentation+1) if node.right is not None: print ' '*(indentation+1),'RIGHT', self.debug(node.right,indentation+1) if __name__ == '__main__': urls = compact_tst() def callback(key,value): assert urls[key] == value, "%s : %s != %s"%(key,urls[key],value) print key, value return False QUANTITY = 1000 - 1 n = 0 try: chars = 0 for n, l in enumerate(file('url_1000000.csv','rb')): if n == QUANTITY: break if n%1000==0 : print n key = l.rstrip() chars += len(key) urls[key] = 0 finally: stats = {} urls.stats(urls.root,stats) print n+1, "lignes", chars, "caracteres" for key in sorted(stats.keys()): print "%16s\t%6i\t%6.2f%%"%( key, stats[key], (key=='total_chars' and 100.0 * stats[key] / chars) or (100.0 * stats[key] / stats['nodes']) ) urls.root = urls.cat(urls.root,True) # On recalcule les stats après concaténation forcée des noeuds. # Si qqchose a changé c'est un bug ! stats = {} urls.stats(urls.root,stats) print n+1, "lignes", chars, "caracteres" for key in sorted(stats.keys()): print "%16s\t%6i\t%6.2f%%"%( key, stats[key], (key=='total_chars' and 100.0 * stats[key] / chars) or (100.0 * stats[key] / stats['nodes']) ) for n, l in enumerate(file('url_1000000.csv','rb')): if n == QUANTITY: break if n%1000==0 : print 'Delete ',n key = l.rstrip() if n%2 == 0 : del urls[key] urls.root = urls.cat(urls.root,True) for n, l in enumerate(file('url_1000000.csv','rb')): if n == QUANTITY: break if n%1000==0 : print 'Check ',n key = l.rstrip() if n%2==1: assert urls[key] == 0 else: assert urls[key] is None # On recalcule les stats après concaténation forcée des noeuds. # Si qqchose a changé c'est un bug ! stats = {} urls.stats(urls.root,stats) print n+1, "lignes", chars, "caracteres" for key in sorted(stats.keys()): print "%16s\t%6i\t%6.2f%%"%( key, stats[key], (key=='total_chars' and 100.0 * stats[key] / chars) or (100.0 * stats[key] / stats['nodes']) ) t = compact_tst() t['nicolas'] = 'nicolas' t['laurent'] = 'laurent' t['nicolas lehuen'] = 'nicolas lehuen' t['laurent querel'] = 'laurent querel' assert 'nicolas' == t['nicolas'] assert 'nicolas lehuen' == t['nicolas lehuen'] assert 'laurent' == t['laurent'] assert 'laurent querel' == t['laurent querel'] import random t = compact_tst() data = range(1000) seed = random.randint(-5000000,5000000) print 'Seed is ',seed seed = 654 random.Random(seed).shuffle(data) for i, d in enumerate(data): if i%100==0: print i t[str(d)] = d for i2, d2 in enumerate(data[:i]): assert t[str(d2)] == d2 for i, d in enumerate(data): if i%100==0: print i if i%3==0: del t[str(d)] t.root = t.cat(t.root,True) for i,d in enumerate(data): if i%3==0 : assert t[str(d)] == None, "%s => %s != %s"%(d,None,t[str(d)]) else: assert t[str(d)] == d, "%s => %s != %s"%(d,i,t[str(d)])
nlehuen/pytst
python/compacttst.py
Python
mit
28,991
[ "VisIt" ]
0c18f50308a7bfd04df8ef961feb022399e46511d4241b861ba765e5243cf42a
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os repository_name = 'bismark_0070' repository_description = "Galaxy's bismark wrapper" repository_long_description = "Long description of Galaxy's bismark wrapper" category_name = 'Test 0070 Invalid Tool Revisions' category_description = 'Tests for a repository with invalid tool revisions.' class TestBismarkRepository( ShedTwillTestCase ): '''Testing bismark with valid and invalid tool entries.''' def test_0000_create_or_login_admin_user( self ): """Create necessary user accounts and login as an admin user.""" self.logout() self.login( email=common.test_user_1_email, username=common.test_user_1_name ) test_user_1 = self.test_db_util.get_user( common.test_user_1_email ) assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % test_user_1_email test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 ) self.logout() self.login( email=common.admin_email, username=common.admin_username ) admin_user = self.test_db_util.get_user( common.admin_email ) assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email admin_user_private_role = self.test_db_util.get_private_role( admin_user ) def test_0005_create_category_and_repository( self ): """Create a category for this test suite, then create and populate a bismark repository. It should contain at least one each valid and invalid tool.""" category = self.create_category( name=category_name, description=category_description ) self.logout() self.login( email=common.test_user_1_email, username=common.test_user_1_name ) repository = self.get_or_create_repository( name=repository_name, description=repository_description, long_description=repository_long_description, owner=common.test_user_1_name, category_id=self.security.encode_id( category.id ), strings_displayed=[] ) self.upload_file( repository, filename='bismark/bismark.tar', filepath=None, valid_tools_only=False, uncompress_file=True, remove_repo_files_not_in_tar=False, commit_message='Uploaded bismark tarball.', strings_displayed=[], strings_not_displayed=[] ) self.display_manage_repository_page( repository, strings_displayed=[ 'Invalid tools' ] ) invalid_revision = self.get_repository_tip( repository ) self.upload_file( repository, filename='bismark/bismark_methylation_extractor.xml', filepath=None, valid_tools_only=False, uncompress_file=False, remove_repo_files_not_in_tar=False, commit_message='Uploaded an updated tool xml.', strings_displayed=[], strings_not_displayed=[] ) valid_revision = self.get_repository_tip( repository ) self.test_db_util.refresh( repository ) tool_guid = '%s/repos/user1/bismark_0070/bismark_methylation_extractor/0.7.7.3' % self.url.replace( 'http://', '' ).rstrip( '/' ) tool_metadata_strings_displayed = [ tool_guid, '0.7.7.3', # The tool version. 'bismark_methylation_extractor', # The tool ID. 'Bismark', # The tool name. 'methylation extractor' ] # The tool description. tool_page_strings_displayed = [ 'Bismark (version 0.7.7.3)' ] self.check_repository_tools_for_changeset_revision( repository, valid_revision, tool_metadata_strings_displayed=tool_metadata_strings_displayed, tool_page_strings_displayed=tool_page_strings_displayed ) self.check_repository_invalid_tools_for_changeset_revision( repository, invalid_revision )
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/test/tool_shed/functional/test_0070_invalid_tool.py
Python
gpl-3.0
4,650
[ "Galaxy" ]
02461af13a42013a6a3074da279a39f4cfe151d6cbee0c9e37383f7e55408b41
from __future__ import division, print_function, unicode_literals # This code is so you can run the samples without installing the package import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) # testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q" tags = "particles, Meteor" import pyglet import cocos from cocos.director import director from cocos.actions import * from cocos.layer import * from cocos.particle_systems import * class L(Layer): def __init__(self): super( L, self).__init__() # p = Fireworks() # p = Explosion() # p = Fire() # p = Flower() # p = Sun() # p = Spiral() p = Meteor() # p = Galaxy() p.position = (480,200) self.add( p ) def main(): director.init( resizable=True ) main_scene = cocos.scene.Scene() main_scene.add( L() ) director.run( main_scene ) if __name__ == '__main__': main()
google-code-export/los-cocos
test/test_particle_meteor.py
Python
bsd-3-clause
978
[ "Galaxy" ]
8ca50d106ca877ab67362bf0a5c5d03ca4ed76b64562e474a5a502fc431bd14f
# force floating point division. Can still use integer with // from __future__ import division # This file is used for importing the common utilities classes. import numpy as np import matplotlib.pyplot as plt import sys from GeneralUtil.python import PlotUtilities import IWT_Util from Research.Perkins.AnalysisUtil.ForceExtensionAnalysis import FEC_Util from FitUtil.EnergyLandscapes.InverseWeierstrass.Python.Code import \ InverseWeierstrass def TomPlot(LandscapeObj,OutBase,UnfoldObj,RefoldObj,idx,f_one_half_N=0e-12): # get a forward and reverse ToX = lambda x: x * 1e9 ToForceY = lambda y: y * 1e12 fig = PlotUtilities.figure(figsize=(8,4)) plt.subplot(1,2,1) SubplotArgs = dict(alpha=0.4,linewidth=0.5) FilterN = 500 Unfold = FEC_Util.GetFilteredForce(UnfoldObj[idx],FilterN) Refold = FEC_Util.GetFilteredForce(RefoldObj[idx],FilterN) UnfoldX = ToX(Unfold.Extension) UnfoldY = ToForceY(Unfold.Force) FoldX = ToX(Refold.Extension) FoldY = ToForceY(Refold.Force) plt.plot(UnfoldX,UnfoldY,color='r',label="Unfolding", **SubplotArgs) plt.plot(FoldX,FoldY,color='b',label="Refolding", **SubplotArgs) fontdict = dict(fontsize=13) x_text_dict = dict(x=60, y=22.5, s="2 nm", fontdict=fontdict, withdash=False, rotation="horizontal") y_text_dict = dict(x=59, y=27, s="5 pN", fontdict=fontdict, withdash=False, rotation="vertical") PlotUtilities.ScaleBar(x_kwargs=dict(x=[60,62],y=[24,24]), y_kwargs=dict(x=[60,60],y=[25,30]), text_x=x_text_dict,text_y=y_text_dict) PlotUtilities.legend(loc=[0.4,0.8],**fontdict) plt.subplot(1,2,2) Obj = IWT_Util.TiltedLandscape(LandscapeObj,f_one_half_N=f_one_half_N) plt.plot(Obj.landscape_ext_nm,Obj.OffsetTilted_kT) plt.xlim([56,69]) plt.ylim([-1,4]) yoffset = 1 x_text_dict = dict(x=58.5, y=yoffset+1.5, s="2 nm", fontdict=fontdict, withdash=False,rotation="horizontal") y_text_dict = dict(x=57, y=yoffset+2.5, s=r"1 k$_\mathrm{b}$T", fontdict=fontdict, withdash=False, rotation="vertical") PlotUtilities.ScaleBar(x_kwargs=dict(x=[58,60], y=[yoffset+1.75,yoffset+1.75]), y_kwargs=dict(x=[58,58],y=[yoffset+2,yoffset+3]), text_x=x_text_dict,text_y=y_text_dict, kill_axis=True) PlotUtilities.savefig(fig,OutBase + "TomMockup" + str(idx) + ".png", subplots_adjust=dict(bottom=-0.1)) # save out the data exactly as we want to plot it common = dict(delimiter=",") ext = str(idx) + ".txt" np.savetxt(X=np.c_[UnfoldX,UnfoldY],fname=OutBase+"Unfold" + ext,**common) np.savetxt(X=np.c_[FoldX,FoldY],fname=OutBase+"Fold"+ext,**common) np.savetxt(X=np.c_[Obj.landscape_ext_nm,Obj.OffsetTilted_kT], fname=OutBase+"Landscape"+ext,**common) def plot_tilted_landscape(LandscapeObj,min_landscape_kT=None, fmt_f_label="{:.0f}", max_landscape_kT=None,f_one_half_N=10e-12,**kwargs): Obj = IWT_Util.TiltedLandscape(LandscapeObj,f_one_half_N=f_one_half_N, **kwargs) Obj.OffsetTilted_kT -= min(Obj.OffsetTilted_kT) plt.plot(Obj.landscape_ext_nm,Obj.OffsetTilted_kT,color='b',alpha=0.7) if (max_landscape_kT is None): max_landscape_kT = max(Obj.OffsetTilted_kT)*1.5 if (min_landscape_kT is None): min_landscape_kT = np.percentile(Obj.OffsetTilted_kT,5)-2 plt.ylim( min_landscape_kT,max_landscape_kT) ylabel = ("Tilted (F=" + fmt_f_label + "pN) [kT]").format(f_one_half_N*1e12) PlotUtilities.lazyLabel("Extension [nm]",ylabel,"",frameon=True) return format_kcal_per_mol_second_axis_after_kT_axis() def get_limit_kcal_per_mol(ax_kT): """ Returns: kilocalorie per mol limits corresponding to given kT limits """ ylim_kT = np.array(ax_kT.get_ylim()) ylim_kcal_per_mol = IWT_Util.kT_to_kcal_per_mol() * ylim_kT return ylim_kcal_per_mol def _set_kcal_axis_based_on_kT(ax_kT,ax_kcal): """ sets the kilocalorie per mol axis based on the current limits of the kT axis Args: ax_<kT/kcal>: the axes to use Returns; nothing """ ylim_kcal_per_mol = get_limit_kcal_per_mol(ax_kT) ax_kcal.set_ylim(ylim_kcal_per_mol) def format_kcal_per_mol_second_axis_after_kT_axis(): """ formats a second, kcal/mol axis after plotting kT data """ ax_kT = plt.gca() ylim_kcal_per_mol = get_limit_kcal_per_mol(ax_kT) ax_kcal = PlotUtilities.secondAxis(ax=ax_kT,label="Energy (kcal/mol)", limits=ylim_kcal_per_mol,color='b', secondY=True) _set_kcal_axis_based_on_kT(ax_kT,ax_kcal) plt.sca(ax_kT) return ax_kcal def plot_free_landscape(LandscapeObj,**kwargs): """ plots a free landscape version extension Args: LandscapeObj: see plot_single_landscape kwargs: passed to TiltedLandscape Returns: tilted landscape """ Obj = IWT_Util.TiltedLandscape(LandscapeObj,**kwargs) plt.plot(Obj.landscape_ext_nm,Obj.Landscape_kT) range = max(Obj.Landscape_kT) - min(Obj.Landscape_kT) fudge = range/10 plt.ylim([-fudge,np.max(Obj.Landscape_kT)+fudge]) PlotUtilities.lazyLabel("","Landscape at F=0 [kT]","",frameon=True) format_kcal_per_mol_second_axis_after_kT_axis() return Obj def plot_single_landscape(LandscapeObj,**kwargs): """ Plots a detailed energy landscape, and saves Args: LandscapeObj: energy landscape object (untilted) **kwargs: passed to plot_tilted_landscape Returns: second, kcal/mol axis of tilted landscape """ plt.subplot(2,1,1) plot_free_landscape(LandscapeObj,**kwargs) plt.subplot(2,1,2) to_ret = plot_tilted_landscape(LandscapeObj,**kwargs) PlotUtilities.xlabel("Extension (nm)") return to_ret def InTheWeedsPlot(OutBase,UnfoldObj,RefoldObj=[],Example=None, Bins=[50,75,100,150,200,500,1000],**kwargs): """ Plots a detailed energy landscape, and saves Args: OutBase: where to start the save UnfoldObj: unfolding objects RefoldObj: refolding objects Bins: how many bins to use in the energy landscape plots <min/max>_landscape_kT: bounds on the landscape Returns: nothing """ # get the IWT kT = 4.1e-21 for b in Bins: LandscapeObj = InverseWeierstrass.\ FreeEnergyAtZeroForce(UnfoldObj,NumBins=b,RefoldingObjs=RefoldObj) # make a 2-D histogram of everything if (Example is not None): fig = PlotUtilities.figure(figsize=(8,8)) ext_nm = Example.Separation*1e9 IWT_Util.ForceExtensionHistograms(ext_nm, Example.Force*1e12, AddAverage=False, nBins=b) PlotUtilities.savefig(fig,OutBase + "0_{:d}hist.pdf".format(b)) # get the distance to the transition state etc print("DeltaG_Dagger is {:.1f}kT".format(Obj.DeltaGDagger)) fig = PlotUtilities.figure(figsize=(12,12)) plot_single_landscape(LandscapeObj,add_meta_half=True, add_meta_free=True,**kwargs) PlotUtilities.savefig(fig,OutBase + "1_{:d}IWT.pdf".format(b))
prheenan/Research
Perkins/AnalysisUtil/EnergyLandscapes/IWT_Plot.py
Python
gpl-3.0
7,873
[ "FoldX" ]
8b4c57c4e4aa0d847477f7284c7d4148ae63e05e490c3d4f718bf00fa53f10a6
""" Module containing the environment to run experiments. An :class:`~pypet.environment.Environment` provides an interface to run experiments based on parameter exploration. The environment contains and might even create a :class:`~pypet.trajectory.Trajectory` container which can be filled with parameters and results (see :mod:`pypet.parameter`). Instance of this trajectory are distributed to the user's job function to perform a single run of an experiment. An `Environment` is the handyman for scheduling, it can be used for multiprocessing and takes care of organizational issues like logging. """ __author__ = 'Robert Meyer' try: import __main__ as main except ImportError as exc: main = None # We can end up here in an interactive IPython console import os import sys import logging import shutil import multiprocessing as multip import traceback import hashlib import time import datetime import inspect try: from sumatra.projects import load_project from sumatra.programs import PythonExecutable except ImportError: load_project = None PythonExecutable = None try: import dill # If you do not set this log-level dill will flood any log file :-( logging.getLogger(dill.__name__).setLevel(logging.WARNING) except ImportError: dill = None try: import psutil except ImportError: psutil = None try: import scoop from scoop import futures, shared except ImportError: scoop = None try: import git except ImportError: git = None try: import zmq except ImportError: zmq = None import pypet.compat as compat import pypet.pypetconstants as pypetconstants from pypet.pypetlogging import LoggingManager, HasLogger, simple_logging_config from pypet.trajectory import Trajectory from pypet.storageservice import HDF5StorageService, LazyStorageService from pypet.utils.mpwrappers import QueueStorageServiceWriter, LockWrapper, \ PipeStorageServiceSender, PipeStorageServiceWriter, ReferenceWrapper, \ ReferenceStore, QueueStorageServiceSender, LockerServer, LockerClient, \ ForkAwareLockerClient, TimeOutLockerServer, QueuingClient, QueuingServer, \ ForkAwareQueuingClient from pypet.utils.siginthandling import sigint_handling from pypet.utils.gitintegration import make_git_commit from pypet._version import __version__ as VERSION from pypet.utils.decorators import deprecated, kwargs_api_change, prefix_naming from pypet.utils.helpful_functions import is_debug, result_sort, format_time, port_to_tcp, \ racedirs from pypet.utils.storagefactory import storage_factory from pypet.utils.configparsing import parse_config from pypet.parameter import Parameter def _pool_single_run(kwargs): """Starts a pool single run and passes the storage service""" wrap_mode = kwargs['wrap_mode'] traj = kwargs['traj'] traj.v_storage_service = _pool_single_run.storage_service if wrap_mode == pypetconstants.WRAP_MODE_LOCAL: # Free references from previous runs traj.v_storage_service.free_references() return _sigint_handling_single_run(kwargs) def _frozen_pool_single_run(kwargs): """Single run wrapper for the frozen pool, makes a single run and passes kwargs""" idx = kwargs.pop('idx') frozen_kwargs = _frozen_pool_single_run.kwargs frozen_kwargs.update(kwargs) # in case of `run_map` # we need to update job's args and kwargs traj = frozen_kwargs['traj'] traj.f_set_crun(idx) return _sigint_handling_single_run(frozen_kwargs) def _configure_pool(kwargs): """Configures the pool and keeps the storage service""" _pool_single_run.storage_service = kwargs['storage_service'] _configure_niceness(kwargs) _configure_logging(kwargs, extract=False) def _configure_frozen_pool(kwargs): """Configures the frozen pool and keeps all kwargs""" _frozen_pool_single_run.kwargs = kwargs _configure_niceness(kwargs) _configure_logging(kwargs, extract=False) # Reset full copy to it's old value traj = kwargs['traj'] traj.v_full_copy = kwargs['full_copy'] def _process_single_run(kwargs): """Wrapper function that first configures logging and starts a single run afterwards.""" _configure_niceness(kwargs) _configure_logging(kwargs) result_queue = kwargs['result_queue'] result = _sigint_handling_single_run(kwargs) result_queue.put(result) result_queue.close() def _configure_frozen_scoop(kwargs): """Wrapper function that configures a frozen SCOOP set up. Deletes of data if necessary. """ def _delete_old_scoop_rev_data(old_scoop_rev): if old_scoop_rev is not None: try: elements = shared.elements for key in elements: var_dict = elements[key] if old_scoop_rev in var_dict: del var_dict[old_scoop_rev] logging.getLogger('pypet.scoop').debug('Deleted old SCOOP data from ' 'revolution `%s`.' % old_scoop_rev) except AttributeError: logging.getLogger('pypet.scoop').error('Could not delete old SCOOP data from ' 'revolution `%s`.' % old_scoop_rev) scoop_rev = kwargs.pop('scoop_rev') # Check if we need to reconfigure SCOOP try: old_scoop_rev = _frozen_scoop_single_run.kwargs['scoop_rev'] configured = old_scoop_rev == scoop_rev except (AttributeError, KeyError): old_scoop_rev = None configured = False if not configured: _frozen_scoop_single_run.kwargs = shared.getConst(scoop_rev, timeout=424.2) frozen_kwargs = _frozen_scoop_single_run.kwargs frozen_kwargs['scoop_rev'] = scoop_rev frozen_kwargs['traj'].v_full_copy = frozen_kwargs['full_copy'] if not scoop.IS_ORIGIN: _configure_niceness(frozen_kwargs) _configure_logging(frozen_kwargs, extract=False) _delete_old_scoop_rev_data(old_scoop_rev) logging.getLogger('pypet.scoop').info('Configured Worker %s' % str(scoop.worker)) def _frozen_scoop_single_run(kwargs): try: _configure_frozen_scoop(kwargs) idx = kwargs.pop('idx') frozen_kwargs = _frozen_scoop_single_run.kwargs frozen_kwargs.update(kwargs) traj = frozen_kwargs['traj'] traj.f_set_crun(idx) return _single_run(frozen_kwargs) except Exception: scoop.logger.exception('ERROR occurred during a single run!') raise def _scoop_single_run(kwargs): """Wrapper function for scoop, that does not configure logging""" try: try: is_origin = scoop.IS_ORIGIN except AttributeError: # scoop is not properly started, i.e. with `python -m scoop...` # in this case scoop uses default `map` function, i.e. # the main process is_origin = True if not is_origin: # configure logging and niceness if not the main process: _configure_niceness(kwargs) _configure_logging(kwargs) return _single_run(kwargs) except Exception: scoop.logger.exception('ERROR occurred during a single run!') raise def _configure_logging(kwargs, extract=True): """Requests the logging manager to configure logging. :param extract: If naming data should be extracted from the trajectory """ try: logging_manager = kwargs['logging_manager'] if extract: logging_manager.extract_replacements(kwargs['traj']) logging_manager.make_logging_handlers_and_tools(multiproc=True) except Exception as exc: sys.stderr.write('Could not configure logging system because of: %s' % repr(exc)) traceback.print_exc() def _configure_niceness(kwargs): """Sets niceness of a process""" niceness = kwargs['niceness'] if niceness is not None: try: try: current = os.nice(0) if niceness - current > 0: # Under Linux you cannot decrement niceness if set elsewhere os.nice(niceness-current) except AttributeError: # Fall back on psutil under Windows psutil.Process().nice(niceness) except Exception as exc: sys.stderr.write('Could not configure niceness because of: %s' % repr(exc)) traceback.print_exc() def _sigint_handling_single_run(kwargs): """Wrapper that allow graceful exits of single runs""" try: graceful_exit = kwargs['graceful_exit'] if graceful_exit: sigint_handling.start() if sigint_handling.hit: result = (sigint_handling.SIGINT, None) else: result = _single_run(kwargs) if sigint_handling.hit: result = (sigint_handling.SIGINT, result) return result return _single_run(kwargs) except: # Log traceback of exception pypet_root_logger = logging.getLogger('pypet') pypet_root_logger.exception('ERROR occurred during a single run! ') raise def _single_run(kwargs): """ Performs a single run of the experiment. :param kwargs: Dict of arguments traj: The trajectory containing all parameters set to the corresponding run index. runfunc: The user's job function runargs: The arguments handed to the user's job function (as *args) runkwargs: The keyword arguments handed to the user's job function (as **kwargs) clean_up_after_run: Whether to clean up after the run automatic_storing: Whether or not the data should be automatically stored result_queue: A queue object to store results into in case a pool is used, otherwise None :return: Results computed by the user's job function which are not stored into the trajectory. Returns a nested tuple of run index and result and run information: ``((traj.v_idx, result), run_information_dict)`` """ pypet_root_logger = logging.getLogger('pypet') traj = kwargs['traj'] runfunc = kwargs['runfunc'] runargs = kwargs['runargs'] kwrunparams = kwargs['runkwargs'] clean_up_after_run = kwargs['clean_up_runs'] automatic_storing = kwargs['automatic_storing'] wrap_mode = kwargs['wrap_mode'] idx = traj.v_idx total_runs = len(traj) pypet_root_logger.info('\n=========================================\n ' 'Starting single run #%d of %d ' '\n=========================================\n' % (idx, total_runs)) # Measure start time traj.f_start_run(turn_into_run=True) # Run the job function of the user result = runfunc(traj, *runargs, **kwrunparams) # Store data if desired if automatic_storing: traj.f_store() # Add the index to the result and the run information if wrap_mode == pypetconstants.WRAP_MODE_LOCAL: result = ((traj.v_idx, result), traj.f_get_run_information(traj.v_idx, copy=False), traj.v_storage_service.references) traj.v_storage_service.free_references() else: result = ((traj.v_idx, result), traj.f_get_run_information(traj.v_idx, copy=False)) # Measure time of finishing traj.f_finalize_run(store_meta_data=False, clean_up=clean_up_after_run) pypet_root_logger.info('\n=========================================\n ' 'Finished single run #%d of %d ' '\n=========================================\n' % (idx, total_runs)) return result def _wrap_handling(kwargs): """ Starts running a queue handler and creates a log file for the queue.""" _configure_logging(kwargs, extract=False) # Main job, make the listener to the queue start receiving message for writing to disk. handler=kwargs['handler'] graceful_exit = kwargs['graceful_exit'] # import cProfile as profile # profiler = profile.Profile() # profiler.enable() if graceful_exit: sigint_handling.start() handler.run() # profiler.disable() # profiler.dump_stats('./queue.profile2') @prefix_naming class Environment(HasLogger): """ The environment to run a parameter exploration. The first thing you usually do is to create and environment object that takes care about the running of the experiment. You can provide the following arguments: :param trajectory: String or trajectory instance. If a string is supplied, a novel trajectory is created with that name. Note that the comment and the dynamically imported classes (see below) are only considered if a novel trajectory is created. If you supply a trajectory instance, these fields can be ignored. :param add_time: If True the current time is added to the trajectory name if created new. :param comment: Comment added to the trajectory if a novel trajectory is created. :param dynamic_imports: Only considered if a new trajectory is created. If you've written custom parameters or results that need to be loaded dynamically during runtime, the module containing the class needs to be specified here as a list of classes or strings naming classes and there module paths. For example: `dynamic_imports = ['pypet.parameter.PickleParameter', MyCustomParameter]` If you only have a single class to import, you do not need the list brackets: `dynamic_imports = 'pypet.parameter.PickleParameter'` :param wildcard_functions: Dictionary of wildcards like `$` and corresponding functions that are called upon finding such a wildcard. For example, to replace the `$` aka `crun` wildcard, you can pass the following: ``wildcard_functions = {('$', 'crun'): myfunc}``. Your wildcard function `myfunc` must return a unique run name as a function of a given integer run index. Moreover, your function must also return a unique *dummy* name for the run index being `-1`. Of course, you can define your own wildcards like `wildcard_functions = {('$mycard', 'mycard'): myfunc)}. These are not required to return a unique name for each run index, but can be used to group runs into buckets by returning the same name for several run indices. Yet, all wildcard functions need to return a dummy name for the index `-1`. :param automatic_storing: If `True` the trajectory will be stored at the end of the simulation and single runs will be stored after their completion. Be aware of data loss if you set this to `False` and not manually store everything. :param log_config: Can be path to a logging `.ini` file specifying the logging configuration. For an example of such a file see :ref:`more-on-logging`. Can also be a dictionary that is accepted by the built-in logging module. Set to `None` if you don't want *pypet* to configure logging. If not specified, the default settings are used. Moreover, you can manually tweak the default settings without creating a new `ini` file. Instead of the `log_config` parameter, pass a ``log_folder``, a list of `logger_names` and corresponding `log_levels` to fine grain the loggers to which the default settings apply. For example: ``log_folder='logs', logger_names='('pypet', 'MyCustomLogger'), log_levels=(logging.ERROR, logging.INFO)`` You can further disable multiprocess logging via setting ``log_multiproc=False``. :param log_stdout: Whether the output of ``stdout`` should be recorded into the log files. Disable if only logging statement should be recorded. Note if you work with an interactive console like *IPython*, it is a good idea to set ``log_stdout=False`` to avoid messing up the console output. Can also be a tuple: ('mylogger', 10), specifying a logger name as well as a log-level. The log-level defines with what level `stdout` is logged, it is *not* a filter. :param report_progress: If progress of runs and an estimate of the remaining time should be shown. Can be `True` or `False` or a triple ``(10, 'pypet', logging.Info)`` where the first number is the percentage and update step of the resulting progressbar and the second one is a corresponding logger name with which the progress should be logged. If you use `'print'`, the `print` statement is used instead. The third value specifies the logging level (level of logging statement *not* a filter) with which the progress should be logged. Note that the progress is based on finished runs. If you use the `QUEUE` wrapping in case of multiprocessing and if storing takes long, the estimate of the remaining time might not be very accurate. :param multiproc: Whether or not to use multiprocessing. Default is ``False``. Besides the wrap_mode (see below) that deals with how storage to disk is carried out in case of multiprocessing, there are two ways to do multiprocessing. By using a fixed pool of processes (choose `use_pool=True`, default option) or by spawning an individual process for every run and parameter combination (`use_pool=False`). The former will only spawn not more than *ncores* processes and all simulation runs are sent over to to the pool one after the other. This requires all your data to be pickled. If your data cannot be pickled (which could be the case for some BRIAN networks, for instance) choose `use_pool=False` (also make sure to set `continuable=False`). This will also spawn at most *ncores* processes at a time, but as soon as a process terminates a new one is spawned with the next parameter combination. Be aware that you will have as many logfiles in your logfolder as processes were spawned. If your simulation returns results besides storing results directly into the trajectory, these returned results still need to be pickled. :param ncores: If multiproc is ``True``, this specifies the number of processes that will be spawned to run your experiment. Note if you use QUEUE mode (see below) the queue process is not included in this number and will add another extra process for storing. If you have *psutil* installed, you can set `ncores=0` to let *psutil* determine the number of CPUs available. :param use_scoop: If python should be used in a SCOOP_ framework to distribute runs amond a cluster or multiple servers. If so you need to start your script via ``python -m scoop my_script.py``. Currently, SCOOP_ only works with ``'LOCAL'`` ``wrap_mode`` (see below). .. _SCOOP: http://scoop.readthedocs.org/ :param use_pool: Whether to use a fixed pool of processes or whether to spawn a new process for every run. Use the former if you perform many runs (50k and more) which are in terms of memory and runtime inexpensive. Be aware that everything you use must be picklable. Use the latter for fewer runs (50k and less) and which are longer lasting and more expensive runs (in terms of memory consumption). In case your operating system allows forking, your data does not need to be picklable. If you choose ``use_pool=False`` you can also make use of the `cap` values, see below. :param freeze_input: Can be set to ``True`` if the run function as well as all additional arguments are immutable. This will prevent the trajectory from getting pickled again and again. Thus, the run function, the trajectory, as well as all arguments are passed to the pool or SCOOP workers at initialisation. Works also under `run_map`. In this case the iterable arguments are, of course, not frozen but passed for every run. :param timeout: Timeout parameter in seconds passed on to SCOOP_ and ``'NETLOCK'`` wrapping. Leave `None` for no timeout. After `timeout` seconds SCOOP_ will assume that a single run failed and skip waiting for it. Moreover, if using ``'NETLOCK'`` wrapping, after `timeout` seconds a lock is automatically released and again available for other waiting processes. :param cpu_cap: If `multiproc=True` and `use_pool=False` you can specify a maximum cpu utilization between 0.0 (excluded) and 100.0 (included) as fraction of maximum capacity. If the current cpu usage is above the specified level (averaged across all cores), *pypet* will not spawn a new process and wait until activity falls below the threshold again. Note that in order to avoid dead-lock at least one process will always be running regardless of the current utilization. If the threshold is crossed a warning will be issued. The warning won't be repeated as long as the threshold remains crossed. For example `cpu_cap=70.0`, `ncores=3`, and currently on average 80 percent of your cpu are used. Moreover, let's assume that at the moment only 2 processes are computing single runs simultaneously. Due to the usage of 80 percent of your cpu, *pypet* will wait until cpu usage drops below (or equal to) 70 percent again until it starts a third process to carry out another single run. The parameters `memory_cap` and `swap_cap` are analogous. These three thresholds are combined to determine whether a new process can be spawned. Accordingly, if only one of these thresholds is crossed, no new processes will be spawned. To disable the cap limits simply set all three values to 100.0. You need the psutil_ package to use this cap feature. If not installed and you choose cap values different from 100.0 a ValueError is thrown. :param memory_cap: Cap value of RAM usage. If more RAM than the threshold is currently in use, no new processes are spawned. Can also be a tuple ``(limit, memory_per_process)``, first value is the cap value (between 0.0 and 100.0), second one is the estimated memory per process in mega bytes (MB). If an estimate is given a new process is not started if the threshold would be crossed including the estimate. :param swap_cap: Analogous to `cpu_cap` but the swap memory is considered. :param niceness: If you are running on a UNIX based system or you have psutil_ (under Windows) installed, you can choose a niceness value to prioritize the child processes executing the single runs in case you use multiprocessing. Under Linux these usually range from 0 (highest priority) to 19 (lowest priority). For Windows values check the psutil_ homepage. Leave ``None`` if you don't care about niceness. Under Linux the `niceness`` value is a minimum value, if the OS decides to nice your program (maybe you are running on a server) *pypet* does not try to decrease the `niceness` again. :param wrap_mode: If multiproc is ``True``, specifies how storage to disk is handled via the storage service. There are a few options: :const:`~pypet.pypetconstants.WRAP_MODE_QUEUE`: ('QUEUE') Another process for storing the trajectory is spawned. The sub processes running the individual single runs will add their results to a multiprocessing queue that is handled by an additional process. Note that this requires additional memory since the trajectory will be pickled and send over the queue for storage! :const:`~pypet.pypetconstants.WRAP_MODE_LOCK`: ('LOCK') Each individual process takes care about storage by itself. Before carrying out the storage, a lock is placed to prevent the other processes to store data. Accordingly, sometimes this leads to a lot of processes waiting until the lock is released. Allows loading of data during runs. :const:`~pypet.pypetconstants.WRAP_MODE_PIPE`: ('PIPE) Experimental mode based on a single pipe. Is faster than ``'QUEUE'`` wrapping but data corruption may occur, does not work under Windows (since it relies on forking). :const:`~pypet.pypetconstant.WRAP_MODE_LOCAL` ('LOCAL') Data is not stored during the single runs but after they completed. Storing is only performed in the main process. Note that removing data during a single run has no longer an effect on memory whatsoever, because there are references kept for all data that is supposed to be stored. :const:`~pypet.pypetconstant.WRAP_MODE_NETLOCK` ('NETLOCK') Similar to 'LOCK' but locks can be shared across a network. Sharing is established by running a lock server that distributes locks to the individual processes. Can be used with SCOOP_ if all hosts have access to a shared home directory. Allows loading of data during runs. :const:`~pypet.pypetconstant.WRAP_MODE_NETQUEUE` ('NETQUEUE') Similar to 'QUEUE' but data can be shared across a network. Sharing is established by running a queue server that distributes locks to the individual processes. If you don't want wrapping at all use :const:`~pypet.pypetconstants.WRAP_MODE_NONE` ('NONE') :param queue_maxsize: Maximum size of the Storage Queue, in case of ``'QUEUE'`` wrapping. ``0`` means infinite, ``-1`` (default) means the educated guess of ``2 * ncores``. :param port: Port to be used by lock server in case of ``'NETLOCK'`` wrapping. Can be a single integer as well as a tuple ``(7777, 9999)`` to specify a range of ports from which to pick a random one. Leave `None` for using pyzmq's default range. In case automatic determining of the host's IP address fails, you can also pass the full address (including the protocol and the port) of the host in the network like ``'tcp://127.0.0.1:7777'``. :param gc_interval: Interval (in runs or storage operations) with which ``gc.collect()`` should be called in case of the ``'LOCAL'``, ``'QUEUE'``, or ``'PIPE'`` wrapping. Leave ``None`` for never. In case of ``'LOCAL'`` wrapping ``1`` means after every run ``2`` after every second run, and so on. In case of ``'QUEUE'`` or ``'PIPE''`` wrapping ``1`` means after every store operation, ``2`` after every second store operation, and so on. Only calls ``gc.collect()`` in the main (if ``'LOCAL'`` wrapping) or the queue/pipe process. If you need to garbage collect data within your single runs, you need to manually call ``gc.collect()``. Usually, there is no need to set this parameter since the Python garbage collection works quite nicely and schedules collection automatically. :param clean_up_runs: In case of single core processing, whether all results under groups named `run_XXXXXXXX` should be removed after the completion of the run. Note in case of multiprocessing this happens anyway since the single run container will be destroyed after finishing of the process. Moreover, if set to ``True`` after post-processing it is checked if there is still data under `run_XXXXXXXX` and this data is removed if the trajectory is expanded. :param immediate_postproc: If you use post- and multiprocessing, you can immediately start analysing the data as soon as the trajectory runs out of tasks, i.e. is fully explored but the final runs are not completed. Thus, while executing the last batch of parameter space points, you can already analyse the finished runs. This is especially helpful if you perform some sort of adaptive search within the parameter space. The difference to normal post-processing is that you do not have to wait until all single runs are finished, but your analysis already starts while there are still runs being executed. This can be a huge time saver especially if your simulation time differs a lot between individual runs. Accordingly, you don't have to wait for a very long run to finish to start post-processing. In case you use immediate postprocessing, the storage service of your trajectory is still multiprocessing safe (except when using the wrap_mode ``'LOCAL'``). Accordingly, you could even use multiprocessing in your immediate post-processing phase if you dare, like use a multiprocessing pool_, for instance. Note that after the execution of the final run, your post-processing routine will be called again as usual. **IMPORTANT**: If you use immediate post-processing, the results that are passed to your post-processing function are not sorted by their run indices but by finishing time! .. _pool: https://docs.python.org/2/library/multiprocessing.html :param resumable: Whether the environment should take special care to allow to resume or continue crashed trajectories. Default is ``False``. You need to install dill_ to use this feature. *dill* will make snapshots of your simulation function as well as the passed arguments. BE AWARE that dill is still rather experimental! Assume you run experiments that take a lot of time. If during your experiments there is a power failure, you can resume your trajectory after the last single run that was still successfully stored via your storage service. The environment will create several `.ecnt` and `.rcnt` files in a folder that you specify (see below). Using this data you can resume crashed trajectories. In order to resume trajectories use :func:`~pypet.environment.Environment.resume`. Be aware that your individual single runs must be completely independent of one another to allow continuing to work. Thus, they should **NOT** be based on shared data that is manipulated during runtime (like a multiprocessing manager list) in the positional and keyword arguments passed to the run function. If you use post-processing, the expansion of trajectories and continuing of trajectories is NOT supported properly. There is no guarantee that both work together. .. _dill: https://pypi.python.org/pypi/dill :param resume_folder: The folder where the resume files will be placed. Note that *pypet* will create a sub-folder with the name of the environment. :param delete_resume: If true, *pypet* will delete the resume files after a successful simulation. :param storage_service: Pass a given storage service or a class constructor (default ``HDF5StorageService``) if you want the environment to create the service for you. The environment will pass the additional keyword arguments you pass directly to the constructor. If the trajectory already has a service attached, the one from the trajectory will be used. :param git_repository: If your code base is under git version control you can specify here the path (relative or absolute) to the folder containing the `.git` directory as a string. Note in order to use this tool you need GitPython_. If you set this path the environment will trigger a commit of your code base adding all files that are currently under version control. Similar to calling `git add -u` and `git commit -m 'My Message'` on the command line. The user can specify the commit message, see below. Note that the message will be augmented by the name and the comment of the trajectory. A commit will only be triggered if there are changes detected within your working copy. This will also add information about the revision to the trajectory, see below. .. _GitPython: http://pythonhosted.org/GitPython/0.3.1/index.html :param git_message: Message passed onto git command. Only relevant if a new commit is triggered. If no changes are detected, the information about the previous commit and the previous commit message are added to the trajectory and this user passed message is discarded. :param git_fail: If `True` the program fails instead of triggering a commit if there are not committed changes found in the code base. In such a case a `GitDiffError` is raised. :param sumatra_project: If your simulation is managed by sumatra_, you can specify here the path to the *sumatra* root folder. Note that you have to initialise the *sumatra* project at least once before via ``smt init MyFancyProjectName``. *pypet* will automatically ad ALL parameters to the *sumatra* record. If a parameter is explored, the WHOLE range is added instead of the default value. *pypet* will add the label and reason (only if provided, see below) to your trajectory as config parameters. .. _sumatra : http://neuralensemble.org/sumatra/ :param sumatra_reason: You can add an additional reason string that is added to the *sumatra* record. Regardless if `sumatra_reason` is empty, the name of the trajectory, the comment as well as a list of all explored parameters is added to the *sumatra* record. Note that the augmented label is not stored into the trajectory as config parameter, but the original one (without the name of the trajectory, the comment, and the list of explored parameters) in case it is not the empty string. :param sumatra_label: The label or name of your sumatra record. Set to `None` if you want sumatra to choose a label in form of a timestamp for you. :param do_single_runs: Whether you intend to actually to compute single runs with the trajectory. If you do not intend to do single runs, than set to ``False`` and the environment won't add config information like number of processors to the trajectory. :param graceful_exit: If ``True`` hitting CTRL+C (i.e.sending SIGINT) will not terminate the program immediately. Instead, active single runs will be finished and stored before shutdown. Hitting CTRL+C twice will raise a KeyboardInterrupt as usual. :param lazy_debug: If ``lazy_debug=True`` and in case you debug your code (aka you use pydevd and the expression ``'pydevd' in sys.modules`` is ``True``), the environment will use the :class:`~pypet.storageservice.LazyStorageService` instead of the HDF5 one. Accordingly, no files are created and your trajectory and results are not saved. This allows faster debugging and prevents *pypet* from blowing up your hard drive with trajectories that you probably not want to use anyway since you just debug your code. The Environment will automatically add some config settings to your trajectory. Thus, you can always look up how your trajectory was run. This encompasses most of the above named parameters as well as some information about the environment. This additional information includes a timestamp as well as a SHA-1 hash code that uniquely identifies your environment. If you use git integration, the SHA-1 hash code will be the one from your git commit. Otherwise the code will be calculated from the trajectory name, the current time, and your current *pypet* version. The environment will be named `environment_XXXXXXX_XXXX_XX_XX_XXhXXmXXs`. The first seven `X` are the first seven characters of the SHA-1 hash code followed by a human readable timestamp. All information about the environment can be found in your trajectory under `config.environment.environment_XXXXXXX_XXXX_XX_XX_XXhXXmXXs`. Your trajectory could potentially be run by several environments due to merging or extending an existing trajectory. Thus, you will be able to track how your trajectory was built over time. Git information is added to your trajectory as follows: * git.commit_XXXXXXX_XXXX_XX_XX_XXh_XXm_XXs.hexsha The SHA-1 hash of the commit. `commit_XXXXXXX_XXXX_XX_XX_XXhXXmXXs` is mapped to the first seven items of the SHA-1 hash and the formatted data of the commit, e.g. `commit_7ef7hd4_2015_10_21_16h29m00s`. * git.commit_XXXXXXX_XXXX_XX_XX_XXh_XXm_XXs.name_rev String describing the commits hexsha based on the closest reference * git.commit_XXXXXXX_XXXX_XX_XX_XXh_XXm_XXs.committed_date Commit date as Unix Epoch data * git.commit_XXXXXXX_XXXX_XX_XX_XXh_XXm_XXs.message The commit message Moreover, if you use the standard ``HDF5StorageService`` you can pass the following keyword arguments in ``**kwargs``: :param filename: The name of the hdf5 file. If none is specified the default `./hdf5/the_name_of_your_trajectory.hdf5` is chosen. If `filename` contains only a path like `filename='./myfolder/', it is changed to `filename='./myfolder/the_name_of_your_trajectory.hdf5'`. :param file_title: Title of the hdf5 file (only important if file is created new) :param overwrite_file: If the file already exists it will be overwritten. Otherwise, the trajectory will simply be added to the file and already existing trajectories are **not** deleted. :param encoding: Format to encode and decode unicode strings stored to disk. The default ``'utf8'`` is highly recommended. :param complevel: You can specify your compression level. 0 means no compression and 9 is the highest compression level. See `PyTables Compression`_ for a detailed description. .. _`PyTables Compression`: http://pytables.github.io/usersguide/optimization.html#compression-issues :param complib: The library used for compression. Choose between *zlib*, *blosc*, and *lzo*. Note that 'blosc' and 'lzo' are usually faster than 'zlib' but it may be the case that you can no longer open your hdf5 files with third-party applications that do not rely on PyTables. :param shuffle: Whether or not to use the shuffle filters in the HDF5 library. This normally improves the compression ratio. :param fletcher32: Whether or not to use the *Fletcher32* filter in the HDF5 library. This is used to add a checksum on hdf5 data. :param pandas_format: How to store pandas data frames. Either in 'fixed' ('f') or 'table' ('t') format. Fixed format allows fast reading and writing but disables querying the hdf5 data and appending to the store (with other 3rd party software other than *pypet*). :param purge_duplicate_comments: If you add a result via :func:`~pypet.naturalnaming.ResultGroup.f_add_result` or a derived parameter :func:`~pypet.naturalnaming.DerivedParameterGroup.f_add_derived_parameter` and you set a comment, normally that comment would be attached to each and every instance. This can produce a lot of unnecessary overhead if the comment is the same for every instance over all runs. If `purge_duplicate_comments=1` than only the comment of the first result or derived parameter instance created in a run is stored or comments that differ from this first comment. For instance, during a single run you call `traj.f_add_result('my_result`,42, comment='Mostly harmless!')` and the result will be renamed to `results.run_00000000.my_result`. After storage in the node associated with this result in your hdf5 file, you will find the comment `'Mostly harmless!'` there. If you call `traj.f_add_result('my_result',-43, comment='Mostly harmless!')` in another run again, let's say run 00000001, the name will be mapped to `results.run_00000001.my_result`. But this time the comment will not be saved to disk since `'Mostly harmless!'` is already part of the very first result with the name 'results.run_00000000.my_result'. Note that the comments will be compared and storage will only be discarded if the strings are exactly the same. If you use multiprocessing, the storage service will take care that the comment for the result or derived parameter with the lowest run index will be considered regardless of the order of the finishing of your runs. Note that this only works properly if all comments are the same. Otherwise the comment in the overview table might not be the one with the lowest run index. You need summary tables (see below) to be able to purge duplicate comments. This feature only works for comments in *leaf* nodes (aka Results and Parameters). So try to avoid to add comments in *group* nodes within single runs. :param summary_tables: Whether the summary tables should be created, i.e. the 'derived_parameters_runs_summary', and the `results_runs_summary`. The 'XXXXXX_summary' tables give a summary about all results or derived parameters. It is assumed that results and derived parameters with equal names in individual runs are similar and only the first result or derived parameter that was created is shown as an example. The summary table can be used in combination with `purge_duplicate_comments` to only store a single comment for every result with the same name in each run, see above. :param small_overview_tables: Whether the small overview tables should be created. Small tables are giving overview about 'config','parameters', 'derived_parameters_trajectory', , 'results_trajectory', 'results_runs_summary'. Note that these tables create some overhead. If you want very small hdf5 files set `small_overview_tables` to False. :param large_overview_tables: Whether to add large overview tables. This encompasses information about every derived parameter, result, and the explored parameter in every single run. If you want small hdf5 files set to ``False`` (default). :param results_per_run: Expected results you store per run. If you give a good/correct estimate storage to hdf5 file is much faster in case you store LARGE overview tables. Default is 0, i.e. the number of results is not estimated! :param derived_parameters_per_run: Analogous to the above. Finally, you can also pass properties of the trajectory, like ``v_with_links=True`` (you can leave the prefix ``v_``, i.e. ``with_links`` works, too). Thus, you can change the settings of the trajectory immediately. .. _psutil: http://psutil.readthedocs.org/ """ @parse_config @kwargs_api_change('delete_continue', 'delete_resume') @kwargs_api_change('continue_folder', 'resume_folder') @kwargs_api_change('continuable', 'resumable') @kwargs_api_change('freeze_pool_input', 'freeze_input') @kwargs_api_change('use_hdf5', 'storage_service') @kwargs_api_change('dynamically_imported_classes', 'dynamic_imports') @kwargs_api_change('pandas_append') @simple_logging_config def __init__(self, trajectory='trajectory', add_time=False, comment='', dynamic_imports=None, wildcard_functions=None, automatic_storing=True, log_config=pypetconstants.DEFAULT_LOGGING, log_stdout=False, report_progress = (5, 'pypet', logging.INFO), multiproc=False, ncores=1, use_scoop=False, use_pool=False, freeze_input=False, timeout=None, cpu_cap=100.0, memory_cap=100.0, swap_cap=100.0, niceness=None, wrap_mode=pypetconstants.WRAP_MODE_LOCK, queue_maxsize=-1, port=None, gc_interval=None, clean_up_runs=True, immediate_postproc=False, resumable=False, resume_folder=None, delete_resume=True, storage_service=HDF5StorageService, git_repository=None, git_message='', git_fail=False, sumatra_project=None, sumatra_reason='', sumatra_label=None, do_single_runs=True, graceful_exit=False, lazy_debug=False, **kwargs): if git_repository is not None and git is None: raise ValueError('You cannot specify a git repository without having ' 'GitPython. Please install the GitPython package to use ' 'pypet`s git integration.') if resumable and dill is None: raise ValueError('Please install `dill` if you want to use the feature to ' 'resume halted trajectories') if load_project is None and sumatra_project is not None: raise ValueError('`sumatra` package has not been found, either install ' '`sumatra` or set `sumatra_project=None`.') if sumatra_label is not None and '.' in sumatra_label: raise ValueError('Your sumatra label is not allowed to contain dots.') if wrap_mode == pypetconstants.WRAP_MODE_NETLOCK and zmq is None: raise ValueError('You need to install `zmq` for `NETLOCK` wrapping.') if (use_pool or use_scoop) and immediate_postproc: raise ValueError('You CANNOT perform immediate post-processing if you DO ' 'use a pool or scoop.') if use_pool and use_scoop: raise ValueError('You can either `use_pool` or `use_scoop` or none of both, ' 'but not both together') if use_scoop and scoop is None: raise ValueError('Cannot use `scoop` because it is not installed.') if (wrap_mode not in (pypetconstants.WRAP_MODE_NONE, pypetconstants.WRAP_MODE_LOCAL, pypetconstants.WRAP_MODE_LOCK, pypetconstants.WRAP_MODE_NETLOCK) and resumable): raise ValueError('Continuing trajectories does only work with ' '`LOCK`, `NETLOCK` or `LOCAL`wrap mode.') if resumable and not automatic_storing: raise ValueError('Continuing only works with `automatic_storing=True`') if use_scoop and wrap_mode not in (pypetconstants.WRAP_MODE_LOCAL, pypetconstants.WRAP_MODE_NONE, pypetconstants.WRAP_MODE_NETLOCK, pypetconstants.WRAP_MODE_NETQUEUE): raise ValueError('SCOOP mode only works with `LOCAL`, `NETLOCK` or ' '`NETQUEUE` wrap mode!') if niceness is not None and not hasattr(os, 'nice') and psutil is None: raise ValueError('You cannot set `niceness` if your operating system does not ' 'support the `nice` operation. Alternatively you can install ' '`psutil`.') if freeze_input and not use_pool and not use_scoop: raise ValueError('You can only use `freeze_input=True` if you either use ' 'a pool or SCOOP.') if not isinstance(memory_cap, tuple): memory_cap = (memory_cap, 0.0) if (cpu_cap <= 0.0 or cpu_cap > 100.0 or memory_cap[0] <= 0.0 or memory_cap[0] > 100.0 or swap_cap <= 0.0 or swap_cap > 100.0): raise ValueError('Please choose cap values larger than 0.0 ' 'and smaller or equal to 100.0.') check_usage = cpu_cap < 100.0 or memory_cap[0] < 100.0 or swap_cap < 100.0 if check_usage and psutil is None: raise ValueError('You cannot enable monitoring without having ' 'installed psutil. Please install psutil or set ' 'cpu_cap, memory_cap, and swap_cap to 100.0') if ncores == 0 and psutil is None: raise ValueError('You cannot set `ncores=0` for auto detection of CPUs if you did not ' 'installed psutil. Please install psutil or ' 'set `ncores` manually.') if port is not None and wrap_mode not in (pypetconstants.WRAP_MODE_NETLOCK, pypetconstants.WRAP_MODE_NETQUEUE): raise ValueError('You can only specify a port for the `NETLOCK` wrapping.') if use_scoop and graceful_exit: raise ValueError('You cannot exit gracefully using SCOOP.') unused_kwargs = set(kwargs.keys()) self._logging_manager = LoggingManager(log_config=log_config, log_stdout=log_stdout, report_progress=report_progress) self._logging_manager.check_log_config() self._logging_manager.add_null_handler() self._set_logger() self._map_arguments = False self._stop_iteration = False # Marker to cancel # iteration in case of Keyboard interrupt self._graceful_exit = graceful_exit # Helper attributes defined later on self._start_timestamp = None self._finish_timestamp = None self._runtime = None self._cpu_cap = cpu_cap self._memory_cap = memory_cap if psutil is not None: # Total memory in MB self._total_memory = psutil.virtual_memory().total / 1024.0 / 1024.0 # Estimated memory needed by each process as ratio self._est_per_process = self._memory_cap[1] / self._total_memory * 100.0 self._swap_cap = swap_cap self._check_usage = check_usage self._last_cpu_check = 0.0 self._last_cpu_usage = 0.0 if self._check_usage: # For initialisation self._estimate_cpu_utilization() self._niceness = niceness self._sumatra_project = sumatra_project self._sumatra_reason = sumatra_reason self._sumatra_label = sumatra_label self._loaded_sumatatra_project = None self._sumatra_record = None self._runfunc = None self._args = () self._kwargs = {} self._postproc = None self._postproc_args = () self._postproc_kwargs = {} self._immediate_postproc = immediate_postproc self._user_pipeline = False self._git_repository = git_repository self._git_message = git_message self._git_fail = git_fail # Check if a novel trajectory needs to be created. if isinstance(trajectory, compat.base_type): # Create a new trajectory self._traj = Trajectory(trajectory, add_time=add_time, dynamic_imports=dynamic_imports, wildcard_functions=wildcard_functions, comment=comment) self._timestamp = self.trajectory.v_timestamp # Timestamp of creation self._time = self.trajectory.v_time # Formatted timestamp else: self._traj = trajectory # If no new trajectory is created the time of the environment differs # from the trajectory and must be computed from the current time. init_time = time.time() formatted_time = format_time(init_time) self._timestamp = init_time self._time = formatted_time # In case the user provided a git repository path, a git commit is performed # and the environment's hexsha is taken from the commit if the commit was triggered by # this particular environment, otherwise a new one is generated if self._git_repository is not None: new_commit, self._hexsha = make_git_commit(self, self._git_repository, self._git_message, self._git_fail) # Identifier hexsha else: new_commit = False if not new_commit: # Otherwise we need to create a novel hexsha self._hexsha = hashlib.sha1(compat.tobytes(self.trajectory.v_name + str(self.trajectory.v_timestamp) + str(self.timestamp) + VERSION)).hexdigest() # Create the name of the environment short_hexsha = self._hexsha[0:7] name = 'environment' self._name = name + '_' + str(short_hexsha) + '_' + self._time # Name of environment # The trajectory should know the hexsha of the current environment. # Thus, for all runs, one can identify by which environment they were run. self._traj._environment_hexsha = self._hexsha self._traj._environment_name = self._name self._logging_manager.extract_replacements(self._traj) self._logging_manager.remove_null_handler() self._logging_manager.make_logging_handlers_and_tools() # Drop a message if we made a commit. We cannot drop the message directly after the # commit, because the logging files do not exist yet, # and we want this commit to be tracked if self._git_repository is not None: if new_commit: self._logger.info('Triggered NEW GIT commit `%s`.' % str(self._hexsha)) else: self._logger.info('No changes detected, added PREVIOUS GIT commit `%s`.' % str(self._hexsha)) # Create the storage service if storage_service is True: # to allow compatibility with older python versions, i.e. old # keyword use_hdf5 storage_service = HDF5StorageService if self._traj.v_storage_service is not None: # Use the service of the trajectory self._logger.info('Found storage service attached to Trajectory. Will use ' 'this storage service.') self._storage_service = self.trajectory.v_storage_service else: # Create a new service self._storage_service, unused_factory_kwargs = storage_factory(storage_service, self._traj, **kwargs) unused_kwargs = unused_kwargs - (set(kwargs.keys()) - unused_factory_kwargs) if lazy_debug and is_debug(): self._storage_service = LazyStorageService() self._traj.v_storage_service = self._storage_service # Create resume path if desired self._resumable = resumable if self._resumable: if resume_folder is None: resume_folder = os.path.join(os.getcwd(), 'resume') resume_path = os.path.join(resume_folder, self._traj.v_name) else: resume_path = None self._resume_folder = resume_folder self._resume_path = resume_path self._delete_resume = delete_resume # Check multiproc self._multiproc = multiproc if ncores == 0: # Let *pypet* detect CPU count via psutil ncores = psutil.cpu_count() self._logger.info('Determined CPUs automatically, found `%d` cores.' % ncores) self._ncores = ncores if queue_maxsize == -1: # Educated guess of queue size queue_maxsize = 2 * ncores self._queue_maxsize = queue_maxsize if wrap_mode is None: # None cannot be used in HDF5 files, accordingly we need a string representation wrap_mode = pypetconstants.WRAP_MODE_NONE self._wrap_mode = wrap_mode # Whether to use a pool of processes self._use_pool = use_pool self._use_scoop = use_scoop self._freeze_input = freeze_input self._gc_interval = gc_interval self._multiproc_wrapper = None # The wrapper Service self._do_single_runs = do_single_runs self._automatic_storing = automatic_storing self._clean_up_runs = clean_up_runs if (wrap_mode == pypetconstants.WRAP_MODE_NETLOCK and not isinstance(port, compat.base_type)): url = port_to_tcp(port) self._logger.info('Determined lock-server URL automatically, it is `%s`.' % url) else: url = port self._url = url self._timeout = timeout # self._deep_copy_data = False # deep_copy_data # For future reference deep_copy_arguments # Notify that in case of lazy debuggin we won't record anythin if lazy_debug and is_debug(): self._logger.warning('Using the LazyStorageService, nothing will be saved to disk.') # Current run index to avoid quadratic runtime complexity in case of re-running self._current_idx = 0 self._trajectory_name = self._traj.v_name for kwarg in list(unused_kwargs): try: val = kwargs[kwarg] self._traj.f_set_properties(**{kwarg: val}) self._logger.info('Set trajectory property `%s` to `%s`.' % (kwarg, str(val))) unused_kwargs.remove(kwarg) except AttributeError: pass if len(unused_kwargs) > 0: raise ValueError('You passed keyword arguments to the environment that you ' 'did not use. The following keyword arguments were ignored: ' '`%s`' % str(unused_kwargs)) # Add all config data to the environment self._add_config() self._logger.info('Environment initialized.') def _add_config(self): # Add config data to the trajectory if self._do_single_runs: # Only add parameters if we actually want single runs to be performed config_name = 'environment.%s.multiproc' % self.name self._traj.f_add_config(Parameter, config_name, self._multiproc, comment='Whether or not to use multiprocessing.').f_lock() if self._multiproc: config_name = 'environment.%s.use_pool' % self.name self._traj.f_add_config(Parameter, config_name, self._use_pool, comment='Whether to use a pool of processes or ' 'spawning individual processes for ' 'each run.').f_lock() config_name = 'environment.%s.use_scoop' % self.name self._traj.f_add_config(Parameter, config_name, self._use_scoop, comment='Whether to use scoop to launch single ' 'runs').f_lock() if self._niceness is not None: config_name = 'environment.%s.niceness' % self.name self._traj.f_add_config(Parameter, config_name, self._niceness, comment='Niceness value of child processes.').f_lock() if self._use_pool: config_name = 'environment.%s.freeze_input' % self.name self._traj.f_add_config(Parameter, config_name, self._freeze_input, comment='If inputs to each run are static and ' 'are not mutated during each run, ' 'can speed up pool running.').f_lock() elif self._use_scoop: pass else: config_name = 'environment.%s.cpu_cap' % self.name self._traj.f_add_config(Parameter, config_name, self._cpu_cap, comment='Maximum cpu usage beyond ' 'which no new processes ' 'are spawned.').f_lock() config_name = 'environment.%s.memory_cap' % self.name self._traj.f_add_config(Parameter, config_name, self._memory_cap, comment='Tuple, first entry: Maximum RAM usage beyond ' 'which no new processes are spawned; ' 'second entry: Estimated usage per ' 'process in MB. 0 if not estimated.').f_lock() config_name = 'environment.%s.swap_cap' % self.name self._traj.f_add_config(Parameter, config_name, self._swap_cap, comment='Maximum Swap memory usage beyond ' 'which no new ' 'processes are spawned').f_lock() config_name = 'environment.%s.immediate_postprocessing' % self.name self._traj.f_add_config(Parameter, config_name, self._immediate_postproc, comment='Whether to use immediate ' 'postprocessing.').f_lock() config_name = 'environment.%s.ncores' % self.name self._traj.f_add_config(Parameter, config_name, self._ncores, comment='Number of processors in case of ' 'multiprocessing').f_lock() config_name = 'environment.%s.wrap_mode' % self.name self._traj.f_add_config(Parameter, config_name, self._wrap_mode, comment='Multiprocessing mode (if multiproc),' ' i.e. whether to use QUEUE' ' or LOCK or NONE' ' for thread/process safe storing').f_lock() if (self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE or self._wrap_mode == pypetconstants.WRAP_MODE_PIPE): config_name = 'environment.%s.queue_maxsize' % self.name self._traj.f_add_config(Parameter, config_name, self._queue_maxsize, comment='Maximum size of Storage Queue/Pipe in case of ' 'multiprocessing and QUEUE/PIPE wrapping').f_lock() if self._wrap_mode == pypetconstants.WRAP_MODE_NETLOCK: config_name = 'environment.%s.url' % self.name self._traj.f_add_config(Parameter, config_name, self._url, comment='URL of lock distribution server, including ' 'protocol and port.').f_lock() if self._wrap_mode == pypetconstants.WRAP_MODE_NETLOCK or self._use_scoop: config_name = 'environment.%s.timeout' % self.name timeout = self._timeout if timeout is None: timeout = -1.0 self._traj.f_add_config(Parameter, config_name, timeout, comment='Timout for scoop and NETLOCK, ' '-1.0 means no timeout.').f_lock() if (self._gc_interval and (self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL or self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE or self._wrap_mode == pypetconstants.WRAP_MODE_PIPE)): config_name = 'environment.%s.gc_interval' % self.name self._traj.f_add_config(Parameter, config_name, self._gc_interval, comment='Intervals with which ``gc.collect()`` ' 'is called.').f_lock() config_name = 'environment.%s.clean_up_runs' % self._name self._traj.f_add_config(Parameter, config_name, self._clean_up_runs, comment='Whether or not results should be removed after the ' 'completion of a single run. ' 'You are not advised to set this ' 'to `False`. Only do it if you know what you are ' 'doing.').f_lock() config_name = 'environment.%s.resumable' % self._name self._traj.f_add_config(Parameter, config_name, self._resumable, comment='Whether or not resume files should ' 'be created. If yes, everything is ' 'handled by `dill`.').f_lock() config_name = 'environment.%s.graceful_exit' % self._name self._traj.f_add_config(Parameter, config_name, self._graceful_exit, comment='Whether or not to allow graceful handling ' 'of `SIGINT` (`CTRL+C`).').f_lock() config_name = 'environment.%s.trajectory.name' % self.name self._traj.f_add_config(Parameter, config_name, self.trajectory.v_name, comment='Name of trajectory').f_lock() config_name = 'environment.%s.trajectory.timestamp' % self.name self._traj.f_add_config(Parameter, config_name, self.trajectory.v_timestamp, comment='Timestamp of trajectory').f_lock() config_name = 'environment.%s.timestamp' % self.name self._traj.f_add_config(Parameter, config_name, self.timestamp, comment='Timestamp of environment creation').f_lock() config_name = 'environment.%s.hexsha' % self.name self._traj.f_add_config(Parameter, config_name, self.hexsha, comment='SHA-1 identifier of the environment').f_lock() config_name = 'environment.%s.automatic_storing' % self.name if not self._traj.f_contains('config.' + config_name): self._traj.f_add_config(Parameter, config_name, self._automatic_storing, comment='If trajectory should be stored automatically in the ' 'end.').f_lock() try: config_name = 'environment.%s.script' % self.name self._traj.f_add_config(Parameter, config_name, main.__file__, comment='Name of the executed main script').f_lock() except AttributeError: pass # We end up here if we use pypet within an ipython console for package_name, version in pypetconstants.VERSIONS_TO_STORE.items(): config_name = 'environment.%s.versions.%s' % (self.name, package_name) self._traj.f_add_config(Parameter, config_name, version, comment='Particular version of a package or distribution ' 'used during experiment. N/A if package could not ' 'be imported.').f_lock() self._traj.config.environment.v_comment = 'Settings for the different environments ' \ 'used to run the experiments' def __repr__(self): """String representation of environment""" repr_string = '<%s %s for Trajectory %s>' % (self.__class__.__name__, self.name, self.trajectory.v_name) return repr_string def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.disable_logging() def disable_logging(self, remove_all_handlers=True): """Removes all logging handlers and stops logging to files and logging stdout. :param remove_all_handlers: If `True` all logging handlers are removed. If you want to keep the handlers set to `False`. """ self._logging_manager.finalize(remove_all_handlers) @kwargs_api_change('continue_folder', 'resume_folder') def resume(self, trajectory_name=None, resume_folder=None): """Resumes crashed trajectories. :param trajectory_name: Name of trajectory to resume, if not specified the name passed to the environment is used. Be aware that if `add_time=True` the name you passed to the environment is altered and the current date is added. :param resume_folder: The folder where resume files can be found. Do not pass the name of the sub-folder with the trajectory name, but to the name of the parental folder. If not specified the resume folder passed to the environment is used. :return: List of the individual results returned by your run function. Returns a LIST OF TUPLES, where first entry is the run idx and second entry is the actual result. In case of multiprocessing these are not necessarily ordered according to their run index, but ordered according to their finishing time. Does not contain results stored in the trajectory! In order to access these simply interact with the trajectory object, potentially after calling`~pypet.trajectory.Trajectory.f_update_skeleton` and loading all results at once with :func:`~pypet.trajectory.f_load` or loading manually with :func:`~pypet.trajectory.f_load_items`. Even if you use multiprocessing without a pool the results returned by `runfunc` still need to be pickled. """ if trajectory_name is None: self._trajectory_name = self.trajectory.v_name else: self._trajectory_name = trajectory_name if resume_folder is not None: self._resume_folder = resume_folder return self._execute_runs(None) @property def trajectory(self): """ The trajectory of the Environment""" return self._traj @property def traj(self): """ Equivalent to env.trajectory""" return self.trajectory @property def current_idx(self): """The current run index that is the next one to be executed. Can be set manually to make the environment consider old non-completed ones. """ return self._current_idx @current_idx.setter def current_idx(self, idx): self._current_idx = idx @property def hexsha(self): """The SHA1 identifier of the environment. It is identical to the SHA1 of the git commit. If version control is not used, the environment hash is computed from the trajectory name, the current timestamp and your current *pypet* version.""" return self._hexsha @property def time(self): """ Time of the creation of the environment, human readable.""" return self._time @property def timestamp(self): """Time of creation as python datetime float""" return self._timestamp @property def name(self): """ Name of the Environment""" return self._name def add_postprocessing(self, postproc, *args, **kwargs): """ Adds a post processing function. The environment will call this function via ``postproc(traj, result_list, *args, **kwargs)`` after the completion of the single runs. This function can load parts of the trajectory id needed and add additional results. Moreover, the function can be used to trigger an expansion of the trajectory. This can be useful if the user has an `optimization` task. Either the function calls `f_expand` directly on the trajectory or returns an dictionary. If latter `f_expand` is called by the environment. Note that after expansion of the trajectory, the post-processing function is called again (and again for further expansions). Thus, this allows an iterative approach to parameter exploration. Note that in case post-processing is called after all runs have been executed, the storage service of the trajectory is no longer multiprocessing safe. If you want to use multiprocessing in your post-processing you can still manually wrap the storage service with the :class:`~pypet.environment.MultiprocessWrapper`. In case you use **immediate** postprocessing, the storage service of your trajectory is still multiprocessing safe (except when using the wrap_mode ``'LOCAL'``). Accordingly, you could even use multiprocessing in your immediate post-processing phase if you dare, like use a multiprocessing pool_, for instance. You can easily check in your post-processing function if the storage service is multiprocessing safe via the ``multiproc_safe`` attribute, i.e. ``traj.v_storage_service.multiproc_safe``. :param postproc: The post processing function :param args: Additional arguments passed to the post-processing function :param kwargs: Additional keyword arguments passed to the postprocessing function :return: """ self._postproc = postproc self._postproc_args = args self._postproc_kwargs = kwargs def pipeline(self, pipeline): """ You can make *pypet* supervise your whole experiment by defining a pipeline. `pipeline` is a function that defines the entire experiment. From pre-processing including setting up the trajectory over defining the actual simulation runs to post processing. The `pipeline` function needs to return TWO tuples with a maximum of three entries each. For example: :: return (runfunc, args, kwargs), (postproc, postproc_args, postproc_kwargs) Where `runfunc` is the actual simulation function thet gets passed the trajectory container and potentially additional arguments `args` and keyword arguments `kwargs`. This will be run by your environment with all parameter combinations. `postproc` is a post processing function that handles your computed results. The function must accept as arguments the trajectory container, a list of results (list of tuples (run idx, result) ) and potentially additional arguments `postproc_args` and keyword arguments `postproc_kwargs`. As for :func:`~pypet.environment.Environment.f_add_postproc`, this function can potentially extend the trajectory. If you don't want to apply post-processing, your pipeline function can also simply return the run function and the arguments: :: return runfunc, args, kwargs Or :: return runfunc, args Or :: return runfunc ``return runfunc, kwargs`` does NOT work, if you don't want to pass `args` do ``return runfunc, (), kwargs``. Analogously combinations like :: return (runfunc, args), (postproc,) work as well. :param pipeline: The pipleine function, taking only a single argument `traj`. And returning all functions necessary for your experiment. :return: List of the individual results returned by `runfunc`. Returns a LIST OF TUPLES, where first entry is the run idx and second entry is the actual result. In case of multiprocessing these are not necessarily ordered according to their run index, but ordered according to their finishing time. Does not contain results stored in the trajectory! In order to access these simply interact with the trajectory object, potentially after calling :func:`~pypet.trajectory.Trajectory.f_update_skeleton` and loading all results at once with :func:`~pypet.trajectory.f_load` or loading manually with :func:`~pypet.trajectory.f_load_items`. Even if you use multiprocessing without a pool the results returned by `runfunc` still need to be pickled. Results computed from `postproc` are not returned. `postproc` should not return any results except dictionaries if the trajectory should be expanded. """ self._user_pipeline = True self._map_arguments = False return self._execute_runs(pipeline) def pipeline_map(self, pipeline): """Creates a pipeline with iterable arguments""" self._user_pipeline = True self._map_arguments = True return self._execute_runs(pipeline) def run(self, runfunc, *args, **kwargs): """ Runs the experiments and explores the parameter space. :param runfunc: The task or job to do :param args: Additional arguments (not the ones in the trajectory) passed to `runfunc` :param kwargs: Additional keyword arguments (not the ones in the trajectory) passed to `runfunc` :return: List of the individual results returned by `runfunc`. Returns a LIST OF TUPLES, where first entry is the run idx and second entry is the actual result. They are always ordered according to the run index. Does not contain results stored in the trajectory! In order to access these simply interact with the trajectory object, potentially after calling`~pypet.trajectory.Trajectory.f_update_skeleton` and loading all results at once with :func:`~pypet.trajectory.f_load` or loading manually with :func:`~pypet.trajectory.f_load_items`. If you use multiprocessing without a pool the results returned by `runfunc` still need to be pickled. """ pipeline = lambda traj: ((runfunc, args, kwargs), (self._postproc, self._postproc_args, self._postproc_kwargs)) self._user_pipeline = False self._map_arguments = False return self._execute_runs(pipeline) def run_map(self, runfunc, *iter_args, **iter_kwargs): """Calls runfunc with different args and kwargs each time. Similar to `:func:`~pypet.environment.Environment.run` but all ``iter_args`` and ``iter_kwargs`` need to be iterables, iterators, or generators that return new arguments for each run. """ if len(iter_args) == 0 and len(iter_kwargs) == 0: raise ValueError('Use `run` if you don`t have any other arguments.') pipeline = lambda traj: ((runfunc, iter_args, iter_kwargs), (self._postproc, self._postproc_args, self._postproc_kwargs)) self._user_pipeline = False self._map_arguments = True return self._execute_runs(pipeline) def _trigger_resume_snapshot(self): """ Makes the trajectory continuable in case the user wants that""" dump_dict = {} dump_filename = os.path.join(self._resume_path, 'environment.ecnt') # Store the trajectory before the first runs prev_full_copy = self._traj.v_full_copy dump_dict['full_copy'] = prev_full_copy self._traj.v_full_copy = True prev_storage_service = self._traj.v_storage_service self._traj.v_storage_service = self._storage_service dump_dict['trajectory'] = self._traj dump_dict['args'] = self._args dump_dict['kwargs'] = self._kwargs dump_dict['runfunc'] = self._runfunc dump_dict['postproc'] = self._postproc dump_dict['postproc_args'] = self._postproc_args dump_dict['postproc_kwargs'] = self._postproc_kwargs dump_dict['start_timestamp'] = self._start_timestamp dump_file = open(dump_filename, 'wb') dill.dump(dump_dict, dump_file, protocol=2) dump_file.flush() dump_file.close() self._traj.v_full_copy = prev_full_copy self._traj.v_storage_service = prev_storage_service def _prepare_sumatra(self): """ Prepares a sumatra record """ reason = self._sumatra_reason if reason: reason += ' -- ' if self._traj.v_comment: commentstr = ' (`%s`)' % self._traj.v_comment else: commentstr = '' reason += 'Trajectory %s%s -- Explored Parameters: %s' % \ (self._traj.v_name, commentstr, str(compat.listkeys(self._traj._explored_parameters))) self._logger.info('Preparing sumatra record with reason: %s' % reason) self._sumatra_reason = reason self._loaded_sumatatra_project = load_project(self._sumatra_project) if self._traj.f_contains('parameters', shortcuts=False): param_dict = self._traj.parameters.f_to_dict(fast_access=False) for param_name in compat.listkeys(param_dict): param = param_dict[param_name] if param.f_has_range(): param_dict[param_name] = param.f_get_range() else: param_dict[param_name] = param.f_get() else: param_dict = {} relpath = os.path.relpath(sys.modules['__main__'].__file__, self._sumatra_project) executable = PythonExecutable(path=sys.executable) self._sumatra_record = self._loaded_sumatatra_project.new_record( parameters=param_dict, main_file=relpath, executable=executable, label=self._sumatra_label, reason=reason) def _finish_sumatra(self): """ Saves a sumatra record """ finish_time = self._start_timestamp - self._finish_timestamp self._sumatra_record.duration = finish_time self._sumatra_record.output_data = self._sumatra_record.datastore.find_new_data(self._sumatra_record.timestamp) self._loaded_sumatatra_project.add_record(self._sumatra_record) self._loaded_sumatatra_project.save() sumatra_label = self._sumatra_record.label config_name = 'sumatra.record_%s.label' % str(sumatra_label) conf_list = [] if not self._traj.f_contains('config.' + config_name): conf1 = self._traj.f_add_config(Parameter, config_name, str(sumatra_label), comment='The label of the sumatra record') conf_list.append(conf1) if self._sumatra_reason: config_name = 'sumatra.record_%s.reason' % str(sumatra_label) if not self._traj.f_contains('config.' + config_name): conf2 = self._traj.f_add_config(Parameter, config_name, str(self._sumatra_reason), comment='Reason of sumatra run.') conf_list.append(conf2) if self._automatic_storing and conf_list: self._traj.f_store_items(conf_list) self._logger.info('Saved sumatra project record with reason: ' '%s' % str(self._sumatra_reason)) def _prepare_resume(self): """ Prepares the continuation of a crashed trajectory """ if not self._resumable: raise RuntimeError('If you create an environment to resume a run, you need to ' 'set `continuable=True`.') if not self._do_single_runs: raise RuntimeError('You cannot resume a run if you did create an environment ' 'with `do_single_runs=False`.') self._resume_path = os.path.join(self._resume_folder, self._trajectory_name) cnt_filename = os.path.join(self._resume_path, 'environment.ecnt') cnt_file = open(cnt_filename, 'rb') resume_dict = dill.load(cnt_file) cnt_file.close() traj = resume_dict['trajectory'] # We need to update the information about the trajectory name config_name = 'config.environment.%s.trajectory.name' % self.name if self._traj.f_contains(config_name, shortcuts=False): param = self._traj.f_get(config_name, shortcuts=False) param.f_unlock() param.f_set(traj.v_name) param.f_lock() config_name = 'config.environment.%s.trajectory.timestamp' % self.name if self._traj.f_contains(config_name, shortcuts=False): param = self._traj.f_get(config_name, shortcuts=False) param.f_unlock() param.f_set(traj.v_timestamp) param.f_lock() # Merge the information so that we keep a record about the current environment if not traj.config.environment.f_contains(self.name, shortcuts=False): traj._merge_config(self._traj) self._traj = traj # User's job function self._runfunc = resume_dict['runfunc'] # Arguments to the user's job function self._args = resume_dict['args'] # Keyword arguments to the user's job function self._kwargs = resume_dict['kwargs'] # Postproc Function self._postproc = resume_dict['postproc'] # Postprog args self._postproc_args = resume_dict['postproc_args'] # Postproc Kwargs self._postproc_kwargs = resume_dict['postproc_kwargs'] # Unpack the trajectory self._traj.v_full_copy = resume_dict['full_copy'] # Load meta data self._traj.f_load(load_parameters=pypetconstants.LOAD_NOTHING, load_derived_parameters=pypetconstants.LOAD_NOTHING, load_results=pypetconstants.LOAD_NOTHING, load_other_data=pypetconstants.LOAD_NOTHING) # Now we have to reconstruct previous results result_list = [] full_filename_list = [] for filename in os.listdir(self._resume_path): _, ext = os.path.splitext(filename) if ext != '.rcnt': continue full_filename = os.path.join(self._resume_path, filename) cnt_file = open(full_filename, 'rb') result_list.append(dill.load(cnt_file)) cnt_file.close() full_filename_list.append(full_filename) new_result_list = [] for result_tuple in result_list: run_information = result_tuple[1] self._traj._update_run_information(run_information) new_result_list.append(result_tuple[0]) result_sort(new_result_list) # Add a config parameter signalling that an experiment was resumed, and how many of them config_name = 'environment.%s.resumed' % self.name if not config_name in self._traj: self._traj.f_add_config(Parameter, config_name, True, comment='Added if a crashed trajectory was continued.') self._logger.info('I will resume trajectory `%s`.' % self._traj.v_name) return new_result_list def _prepare_runs(self, pipeline): """Prepares the running of an experiment :param pipeline: A pipeline function that defines the task """ pip_result = pipeline(self._traj) # Call the pipeline function # Extract the task to do from the pipeline result raise_error = False if pip_result is None: if self._do_single_runs: raise RuntimeError('Your pipeline function did return `None`.' 'Accordingly, I assume you just do data analysis. ' 'Please create and environment with `do_single_runs=False`.') self._logger.info('Your pipeline returned no runfunction, I assume you do some ' 'sort of data analysis and will skip any single run execution.') self._runfunc = None return elif (len(pip_result) == 2 and isinstance(pip_result[0], tuple) and isinstance(pip_result[1], tuple)): # Extract the run and post-processing functions and arguments run_tuple = pip_result[0] self._runfunc = run_tuple[0] if len(run_tuple) > 1: self._args = run_tuple[1] if len(run_tuple) > 2: self._kwargs = run_tuple[2] if len(run_tuple) > 3: raise_error = True postproc_tuple = pip_result[1] if len(postproc_tuple) > 0: self._postproc = postproc_tuple[0] if len(postproc_tuple) > 1: self._postproc_args = postproc_tuple[1] if len(postproc_tuple) > 2: self._postproc_kwargs = postproc_tuple[2] if len(run_tuple) > 3: raise_error = True elif len(pip_result) <= 3: self._runfunc = pip_result[0] if len(pip_result) > 1: self._args = pip_result[1] if len(pip_result) > 2: self._kwargs = pip_result[2] else: raise_error = True if raise_error: raise RuntimeError('Your pipeline result is not understood please return' 'a tuple of maximum length 3: ``(runfunc, args, kwargs)`` ' 'Or return two tuple of maximum length 3: ' '``(runfunc, args, kwargs), ' '(postproc, postproc_args, postproc_kwargs)') if self._runfunc is not None and not self._do_single_runs: raise RuntimeError('You cannot make a run if you did create an environment ' 'with `do_single_runs=False`.') if self._resumable: racedirs(self._resume_path) if os.listdir(self._resume_path): raise RuntimeError('Your resume folder `%s` needs ' 'to be empty to allow continuing!' % self._resume_path) if self._user_pipeline: self._logger.info('\n************************************************************\n' 'STARTING PPREPROCESSING for trajectory\n`%s`' '\n************************************************************\n' % self._traj.v_name) # Make some preparations (locking of parameters etc) and store the trajectory self._logger.info('I am preparing the Trajectory for the experiment and ' 'initialise the store.') self._traj._prepare_experiment() self._logger.info('Initialising the storage for the trajectory.') self._traj.f_store(only_init=True) def _show_progress(self, n, total_runs): """Displays a progressbar""" self._logging_manager.show_progress(n, total_runs) def _make_kwargs(self, **kwargs): """Creates the keyword arguments for the single run handling""" result_dict = {'traj': self._traj, 'logging_manager': self._logging_manager, 'runfunc': self._runfunc, 'runargs': self._args, 'runkwargs': self._kwargs, 'clean_up_runs': self._clean_up_runs, 'automatic_storing': self._automatic_storing, 'wrap_mode': self._wrap_mode, 'niceness': self._niceness, 'graceful_exit': self._graceful_exit} result_dict.update(kwargs) if self._multiproc: if self._use_pool or self._use_scoop: if self._use_scoop: del result_dict['graceful_exit'] if self._freeze_input: # Remember the full copy setting for the frozen input to # change this back once the trajectory is received by # each process result_dict['full_copy'] = self.traj.v_full_copy if self._map_arguments: del result_dict['runargs'] del result_dict['runkwargs'] else: result_dict['clean_up_runs'] = False if self._use_pool: # Needs only be deleted in case of using a pool but necessary for scoop del result_dict['logging_manager'] del result_dict['niceness'] else: result_dict['clean_up_runs'] = False return result_dict def _make_index_iterator(self, start_run_idx): """Returns an iterator over the run indices that are not completed""" total_runs = len(self._traj) for n in compat.xrange(start_run_idx, total_runs): self._current_idx = n + 1 if self._stop_iteration: self._logger.debug('I am stopping new run iterations now!') break if not self._traj._is_completed(n): self._traj.f_set_crun(n) yield n else: self._logger.debug('Run `%d` has already been completed, I am skipping it.' % n) def _make_iterator(self, start_run_idx, copy_data=False, **kwargs): """ Returns an iterator over all runs and yields the keyword arguments """ if (not self._freeze_input) or (not self._multiproc): kwargs = self._make_kwargs(**kwargs) def _do_iter(): if self._map_arguments: self._args = tuple(iter(arg) for arg in self._args) for key in compat.listkeys(self._kwargs): self._kwargs[key] = iter(self._kwargs[key]) for idx in self._make_index_iterator(start_run_idx): iter_args = tuple(next(x) for x in self._args) iter_kwargs = {} for key in self._kwargs: iter_kwargs[key] = next(self._kwargs[key]) kwargs['runargs'] = iter_args kwargs['runkwargs'] = iter_kwargs if self._freeze_input: # Frozen pool needs current run index kwargs['idx'] = idx if copy_data: copied_kwargs = kwargs.copy() if not self._freeze_input: copied_kwargs['traj'] = self._traj.f_copy(copy_leaves='explored', with_links=True) yield copied_kwargs else: yield kwargs else: for idx in self._make_index_iterator(start_run_idx): if self._freeze_input: # Frozen pool needs current run index kwargs['idx'] = idx if copy_data: copied_kwargs = kwargs.copy() if not self._freeze_input: copied_kwargs['traj'] = self._traj.f_copy(copy_leaves='explored', with_links=True) yield copied_kwargs else: yield kwargs return _do_iter() def _execute_postproc(self, results): """ Executes a postprocessing function :param results: List of tuples containing the run indices and the results :return: 1. Whether to new single runs, since the trajectory was enlarged 2. Index of next new run 3. Number of new runs """ repeat = False start_run_idx = 0 new_runs = 0 # Do some finalization self._traj._finalize(store_meta_data=True) old_traj_length = len(self._traj) postproc_res = self._postproc(self._traj, results, *self._postproc_args, **self._postproc_kwargs) if postproc_res is None: pass elif isinstance(postproc_res, dict): if postproc_res: self._traj.f_expand(postproc_res) elif isinstance(postproc_res, tuple): expand_dict = postproc_res[0] if len(postproc_res) > 1: self._args = postproc_res[1] if len(postproc_res) > 2: self._kwargs = postproc_res[2] if len(postproc_res) > 3: self._postproc_args = postproc_res[3] if len(postproc_res) > 4: self._postproc_kwargs = postproc_res[4] if expand_dict: self._traj.f_expand(expand_dict) else: self._logger.error('Your postproc result `%s` was not understood.' % str(postproc_res)) new_traj_length = len(self._traj) if new_traj_length != old_traj_length: start_run_idx = old_traj_length repeat = True if self._resumable: self._logger.warning('Continuing a trajectory AND expanding it during runtime is ' 'NOT supported properly, there is no guarantee that this ' 'works!') self._traj.f_store(only_init=True) new_traj_length = len(self._traj) new_runs = new_traj_length - old_traj_length return repeat, start_run_idx, new_runs def _estimate_cpu_utilization(self): """Estimates the cpu utilization within the last 500ms""" now = time.time() if now - self._last_cpu_check >= 0.5: try: self._last_cpu_usage = psutil.cpu_percent() self._last_cpu_check = now except (psutil.NoSuchProcess, ZeroDivisionError): pass # psutil sometimes produces ZeroDivisionErrors, has been fixed in newer # Versions but we want to support older as well return self._last_cpu_usage def _estimate_memory_utilization(self, process_dict): """Estimates memory utilization to come if process was started""" n_processes = len(process_dict) total_utilization = psutil.virtual_memory().percent sum = 0.0 for proc in compat.itervalues(process_dict): try: sum += psutil.Process(proc.pid).memory_percent() except (psutil.NoSuchProcess, ZeroDivisionError): pass curr_all_processes = sum missing_utilization = max(0.0, n_processes * self._est_per_process - curr_all_processes) estimated_utilization = total_utilization estimated_utilization += missing_utilization estimated_utilization += self._est_per_process return estimated_utilization def _execute_runs(self, pipeline): """ Starts the individual single runs. Starts runs sequentially or initiates multiprocessing. :param pipeline: A pipeline function producing the run function the corresponding arguments and postprocessing function and arguments :return: List of tuples, where each tuple contains the run idx and the result. """ if self._start_timestamp is None: self._start_timestamp = time.time() if self._map_arguments and self._resumable: raise ValueError('You cannot use `run_map` or `pipeline_map` in combination ' 'with continuing option.') if self._sumatra_project is not None: self._prepare_sumatra() if pipeline is not None: results = [] self._prepare_runs(pipeline) else: results = self._prepare_resume() if self._runfunc is not None: self._traj._run_by_environment = True if self._graceful_exit: sigint_handling.start() try: self._inner_run_loop(results) finally: self._traj._run_by_environment = False self._stop_iteration = False if self._graceful_exit: sigint_handling.finalize() self._add_wildcard_config() if self._automatic_storing: self._logger.info('\n************************************************************\n' 'STARTING FINAL STORING of trajectory\n`%s`' '\n************************************************************\n' % self._traj.v_name) self._traj.f_store() self._logger.info('\n************************************************************\n' 'FINISHED FINAL STORING of trajectory\n`%s`.' '\n************************************************************\n' % self._traj.v_name) self._finish_timestamp = time.time() findatetime = datetime.datetime.fromtimestamp(self._finish_timestamp) startdatetime = datetime.datetime.fromtimestamp(self._start_timestamp) self._runtime = str(findatetime - startdatetime) conf_list = [] config_name = 'environment.%s.start_timestamp' % self.name if not self._traj.f_contains('config.' + config_name): conf1 = self._traj.f_add_config(Parameter, config_name, self._start_timestamp, comment='Timestamp of starting of experiment ' '(when the actual simulation was ' 'started (either by calling `run`, ' '`resume`, or `pipeline`).') conf_list.append(conf1) config_name = 'environment.%s.finish_timestamp' % self.name if not self._traj.f_contains('config.' + config_name): conf2 = self._traj.f_add_config(Parameter, config_name, self._finish_timestamp, comment='Timestamp of finishing of an experiment.') else: conf2 = self._traj.f_get('config.' + config_name) conf2.f_unlock() conf2.f_set(self._finish_timestamp) conf_list.append(conf2) config_name = 'environment.%s.runtime' % self.name if not self._traj.f_contains('config.' + config_name): conf3 = self._traj.f_add_config(Parameter, config_name, self._runtime, comment='Runtime of whole experiment.') else: conf3 = self._traj.f_get('config.' + config_name) conf3.f_unlock() conf3.f_set(self._runtime) conf_list.append(conf3) if self._automatic_storing: self._traj.f_store_items(conf_list, store_data=pypetconstants.OVERWRITE_DATA) if hasattr(self._traj.v_storage_service, 'finalize'): # Finalize the storage service if this is supported self._traj.v_storage_service.finalize() incomplete = [] for run_name in self._traj.f_get_run_names(): if not self._traj._is_completed(run_name): incomplete.append(run_name) if len(incomplete) > 0: self._logger.error('Following runs of trajectory `%s` ' 'did NOT complete: `%s`' % (self._traj.v_name, ', '.join(incomplete))) else: self._logger.info('All runs of trajectory `%s` were completed successfully.' % self._traj.v_name) if self._sumatra_project is not None: self._finish_sumatra() return results def _add_wildcard_config(self): """Adds config data about the wildcard functions""" for idx, pair in enumerate(self._traj._wildcard_functions.items()): wildcards, wc_function = pair for jdx, wildcard in enumerate(wildcards): config_name = ('environment.%s.wildcards.function_%d.wildcard_%d' % (self.name, idx, jdx)) if not self._traj.f_contains('config.' + config_name): self._traj.f_add_config(Parameter, config_name, wildcard, comment='Wildcard symbol for the wildcard function').f_lock() if hasattr(wc_function, '__name__'): config_name = ('environment.%s.wildcards.function_%d.name' % (self.name, idx)) if not self._traj.f_contains('config.' + config_name): self._traj.f_add_config(Parameter, config_name, wc_function.__name__, comment='Nme of wildcard function').f_lock() if wc_function.__doc__: config_name = ('environment.%s.wildcards.function_%d.doc' % (self.name, idx)) if not self._traj.f_contains('config.' + config_name): self._traj.f_add_config(Parameter, config_name, wc_function.__doc__, comment='Docstring of wildcard function').f_lock() try: source = inspect.getsource(wc_function) config_name = ('environment.%s.wildcards.function_%d.source' % (self.name, idx)) if not self._traj.f_contains('config.' + config_name): self._traj.f_add_config(Parameter, config_name, source, comment='Source code of wildcard function').f_lock() except Exception: pass # We cannot find the source, just leave it def _inner_run_loop(self, results): """Performs the inner loop of the run execution""" start_run_idx = self._current_idx expanded_by_postproc = False self._storage_service = self._traj.v_storage_service self._multiproc_wrapper = None if self._resumable: self._trigger_resume_snapshot() self._logger.info( '\n************************************************************\n' 'STARTING runs of trajectory\n`%s`.' '\n************************************************************\n' % self._traj.v_name) while True: if self._multiproc: expanded_by_postproc = self._execute_multiprocessing(start_run_idx, results) else: # Create a generator to generate the tasks iterator = self._make_iterator(start_run_idx) n = start_run_idx total_runs = len(self._traj) # Signal start of progress calculation self._show_progress(n - 1, total_runs) for task in iterator: result = _sigint_handling_single_run(task) n = self._check_result_and_store_references(result, results, n, total_runs) repeat = False if self._postproc is not None: self._logger.info('Performing POSTPROCESSING') repeat, start_run_idx, new_runs = self._execute_postproc(results) if not repeat: break else: expanded_by_postproc = True self._logger.info('POSTPROCESSING expanded the trajectory and added %d new runs' % new_runs) # Do some finalization self._traj._finalize(store_meta_data=True) self._logger.info( '\n************************************************************\n' 'FINISHED all runs of trajectory\n`%s`.' '\n************************************************************\n' % self._traj.v_name) if self._resumable and self._delete_resume: # We remove all resume files if the simulation was successfully completed shutil.rmtree(self._resume_path) if expanded_by_postproc: config_name = 'environment.%s.postproc_expand' % self.name if not self._traj.f_contains('config.' + config_name): self._traj.f_add_config(Parameter, config_name, True, comment='Added if trajectory was expanded ' 'by postprocessing.') def _get_results_from_queue(self, result_queue, results, n, total_runs): """Extract all available results from the queue and returns the increased n""" # Get all results from the result queue while not result_queue.empty(): result = result_queue.get() n = self._check_result_and_store_references(result, results, n, total_runs) return n def _check_result_and_store_references(self, result, results, n, total_runs): """Checks for SIGINT and if reference wrapping and stores references.""" if result[0] == sigint_handling.SIGINT: self._stop_iteration = True result = result[1] # If SIGINT result is a nested tuple if result is not None: if self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL: self._multiproc_wrapper.store_references(result[2]) self._traj._update_run_information(result[1]) results.append(result[0]) if self._resumable: # [0:2] to not store references self._trigger_result_snapshot(result[0:2]) self._show_progress(n, total_runs) n += 1 return n def _trigger_result_snapshot(self, result): """ Triggers a snapshot of the results for continuing :param result: Currently computed result """ timestamp = result[1]['finish_timestamp'] timestamp_str = repr(timestamp).replace('.', '_') filename = 'result_%s' % timestamp_str extension = '.ncnt' dump_filename = os.path.join(self._resume_path, filename + extension) dump_file = open(dump_filename, 'wb') dill.dump(result, dump_file, protocol=2) dump_file.flush() dump_file.close() # We rename the file to be certain that the trajectory did not crash during taking # the snapshot! extension = '.rcnt' rename_filename = os.path.join(self._resume_path, filename + extension) shutil.move(dump_filename, rename_filename) def _execute_multiprocessing(self, start_run_idx, results): """Performs multiprocessing and signals expansion by postproc""" n = start_run_idx total_runs = len(self._traj) expanded_by_postproc = False if (self._wrap_mode == pypetconstants.WRAP_MODE_NONE or self._storage_service.multiproc_safe): self._logger.info('I assume that your storage service is multiprocessing safe.') else: use_manager = (self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE or self._immediate_postproc) self._multiproc_wrapper = MultiprocContext(self._traj, self._wrap_mode, full_copy=None, manager=None, use_manager=use_manager, lock=None, queue=None, queue_maxsize=self._queue_maxsize, port=self._url, timeout=self._timeout, gc_interval=self._gc_interval, log_config=self._logging_manager.log_config, log_stdout=self._logging_manager.log_stdout, graceful_exit=self._graceful_exit) self._multiproc_wrapper.start() try: if self._use_pool: self._logger.info('Starting Pool with %d processes' % self._ncores) if self._freeze_input: self._logger.info('Freezing pool input') init_kwargs = self._make_kwargs() # To work under windows we must allow the full-copy now! # Because windows does not support forking! pool_full_copy = self._traj.v_full_copy self._traj.v_full_copy = True initializer = _configure_frozen_pool target = _frozen_pool_single_run else: # We don't want to pickle the storage service pool_service = self._traj.v_storage_service self._traj.v_storage_service = None init_kwargs = dict(logging_manager=self._logging_manager, storage_service=pool_service, niceness=self._niceness) initializer = _configure_pool target = _pool_single_run try: iterator = self._make_iterator(start_run_idx) mpool = multip.Pool(self._ncores, initializer=initializer, initargs=(init_kwargs,)) pool_results = mpool.imap(target, iterator) # Signal start of progress calculation self._show_progress(n - 1, total_runs) for result in pool_results: n = self._check_result_and_store_references(result, results, n, total_runs) # Everything is done mpool.close() mpool.join() finally: if self._freeze_input: self._traj.v_full_copy = pool_full_copy else: self._traj.v_storage_service = pool_service self._logger.info('Pool has joined, will delete it.') del mpool elif self._use_scoop: self._logger.info('Starting SCOOP jobs') if self._freeze_input: self._logger.info('Freezing SCOOP input') if hasattr(_frozen_scoop_single_run, 'kwargs'): self._logger.warning('You already did run an experiment with ' 'SCOOP and a frozen input. Frozen input ' 'is realized as a shared constant, so' 'over time your memory might get bloated. ' 'If you experience trouble, ' 'restart your python interpreter and ' 'SCOOP.') _frozen_scoop_single_run.kwargs = {} scoop_full_copy = self._traj.v_full_copy self._traj.v_full_copy = True init_kwargs = self._make_kwargs() scoop_rev = self.name + '_' + str(time.time()).replace('.','_') shared.setConst(**{scoop_rev: init_kwargs}) iterator = self._make_iterator(start_run_idx, copy_data=True, scoop_rev=scoop_rev) target = _frozen_scoop_single_run else: iterator = self._make_iterator(start_run_idx, copy_data=True) target = _scoop_single_run try: if scoop.IS_RUNNING: scoop_results = futures.map(target, iterator, timeout=self._timeout) else: self._logger.error('SCOOP is NOT running, I will use Python`s map ' 'function. To activate scoop, start your script via ' '`python -m scoop your_script.py`.') scoop_results = map(target, iterator) # Signal start of progress calculation self._show_progress(n - 1, total_runs) for result in scoop_results: n = self._check_result_and_store_references(result, results, n, total_runs) finally: if self._freeze_input: self._traj.v_full_copy = scoop_full_copy else: # If we spawn a single process for each run, we need an additional queue # for the results of `runfunc` if self._immediate_postproc: maxsize = 0 else: maxsize = total_runs start_result_length = len(results) result_queue = multip.Queue(maxsize=maxsize) # Create a generator to generate the tasks for multiprocessing iterator = self._make_iterator(start_run_idx, result_queue=result_queue) self._logger.info('Starting multiprocessing with at most ' '%d processes running at the same time.' % self._ncores) if self._check_usage: self._logger.info( 'Monitoring usage statistics. I will not spawn new processes ' 'if one of the following cap thresholds is crossed, ' 'CPU: %.1f %%, RAM: %.1f %%, Swap: %.1f %%.' % (self._cpu_cap, self._memory_cap[0], self._swap_cap)) keep_running = True # Evaluates to false if trajectory produces # no more single runs process_dict = {} # Dict containing all subprocees # For the cap values, we lazily evaluate them cpu_usage_func = lambda: self._estimate_cpu_utilization() memory_usage_func = lambda: self._estimate_memory_utilization(process_dict) swap_usage_func = lambda: psutil.swap_memory().percent signal_cap = True # If True cap warning is emitted max_signals = 10 # Maximum number of warnings, after that warnings are # no longer signaled # Signal start of progress calculation self._show_progress(n - 1, total_runs) while len(process_dict) > 0 or keep_running: # First check if some processes did finish their job for pid in compat.listkeys(process_dict): proc = process_dict[pid] # Delete the terminated processes if not proc.is_alive(): proc.join() del process_dict[pid] del proc # Check if caps are reached. # Cap is only checked if there is at least one # process working to prevent deadlock. no_cap = True if self._check_usage and self._ncores > len(process_dict) > 0: for cap_name, cap_function, threshold in ( ('CPU Cap', cpu_usage_func, self._cpu_cap), ('Memory Cap', memory_usage_func, self._memory_cap[0]), ('Swap Cap', swap_usage_func, self._swap_cap)): cap_value = cap_function() if cap_value > threshold: no_cap = False if signal_cap: if cap_name == 'Memory Cap': add_on_str = ' [including estimate]' else: add_on_str = '' self._logger.warning('Could not start next process ' 'immediately [currently running ' '%d process(es)]. ' '%s reached, ' '%.1f%% >= %.1f%%%s.' % (len(process_dict), cap_name, cap_value, threshold, add_on_str)) signal_cap = False max_signals -= 1 if max_signals == 0: self._logger.warning('Maximum number of cap warnings ' 'reached. I will no longer ' 'notify about cap violations, ' 'but cap values are still applied ' 'silently in background.') break # If one cap value is reached we can skip the rest # If we have less active processes than # self._ncores and there is still # a job to do, add another process if len(process_dict) < self._ncores and keep_running and no_cap: try: task = next(iterator) proc = multip.Process(target=_process_single_run, args=(task,)) proc.start() process_dict[proc.pid] = proc signal_cap = max_signals > 0 # Only signal max_signals times except StopIteration: # All simulation runs have been started keep_running = False if self._postproc is not None and self._immediate_postproc: if self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL: reference_service = self._traj._storage_service self._traj.v_storage_service = self._storage_service try: self._logger.info('Performing IMMEDIATE POSTPROCESSING.') keep_running, start_run_idx, new_runs = \ self._execute_postproc(results) finally: if self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL: self._traj._storage_service = reference_service if keep_running: expanded_by_postproc = True self._logger.info('IMMEDIATE POSTPROCESSING expanded ' 'the trajectory and added %d ' 'new runs' % new_runs) n = start_run_idx total_runs = len(self._traj) iterator = self._make_iterator(start_run_idx, result_queue=result_queue) if not keep_running: self._logger.debug('All simulation runs have been started. ' 'No new runs will be started. ' 'The simulation will finish after the still ' 'active runs completed.') else: time.sleep(0.001) # Get all results from the result queue n = self._get_results_from_queue(result_queue, results, n, total_runs) # Finally get all results from the result queue once more and finalize the queue self._get_results_from_queue(result_queue, results, n, total_runs) result_queue.close() result_queue.join_thread() del result_queue result_sort(results, start_result_length) finally: # Finalize the wrapper if self._multiproc_wrapper is not None: self._multiproc_wrapper.finalize() self._multiproc_wrapper = None return expanded_by_postproc @prefix_naming class MultiprocContext(HasLogger): """ A lightweight environment that allows the usage of multiprocessing. Can be used if you don't want a full-blown :class:`~pypet.environment.Environment` to enable multiprocessing or if you want to implement your own custom multiprocessing. This Wrapper tool will take a trajectory container and take care that the storage service is multiprocessing safe. Supports the ``'LOCK'`` as well as the ``'QUEUE'`` mode. In case of the latter an extra queue process is created if desired. This process will handle all storage requests and write data to the hdf5 file. Not that in case of ``'QUEUE'`` wrapping data can only be stored not loaded, because the queue will only be read in one direction. :param trajectory: The trajectory which storage service should be wrapped :param wrap_mode: There are four options: :const:`~pypet.pypetconstants.WRAP_MODE_QUEUE`: ('QUEUE') If desired another process for storing the trajectory is spawned. The sub processes running the individual trajectories will add their results to a multiprocessing queue that is handled by an additional process. Note that this requires additional memory since data will be pickled and send over the queue for storage! :const:`~pypet.pypetconstants.WRAP_MODE_LOCK`: ('LOCK') Each individual process takes care about storage by itself. Before carrying out the storage, a lock is placed to prevent the other processes to store data. Accordingly, sometimes this leads to a lot of processes waiting until the lock is released. Yet, data does not need to be pickled before storage! :const:`~pypet.pypetconstants.WRAP_MODE_PIPE`: ('PIPE) Experimental mode based on a single pipe. Is faster than ``'QUEUE'`` wrapping but data corruption may occur, does not work under Windows (since it relies on forking). :const:`~pypet.pypetconstant.WRAP_MODE_LOCAL` ('LOCAL') Data is not stored in spawned child processes, but data needs to be retunred manually in terms of *references* dictionaries (the ``reference`` property of the ``ReferenceWrapper`` class).. Storing is only performed in the main process. Note that removing data during a single run has no longer an effect on memory whatsoever, because there are references kept for all data that is supposed to be stored. :param full_copy: In case the trajectory gets pickled (sending over a queue or a pool of processors) if the full trajectory should be copied each time (i.e. all parameter points) or only a particular point. A particular point can be chosen beforehand with :func:`~pypet.trajectory.Trajectory.f_set_crun`. Leave ``full_copy=None`` if the setting from the passed trajectory should be used. Otherwise ``v_full_copy`` of the trajectory is changed to your chosen value. :param manager: You can pass an optional multiprocessing manager here, if you already have instantiated one. Leave ``None`` if you want the wrapper to create one. :param use_manager: If your lock and queue should be created with a manager or if wrapping should be created from the multiprocessing module directly. For example: ``multiprocessing.Lock()`` or via a manager ``multiprocessing.Manager().Lock()`` (if you specified a manager, this manager will be used). The former is usually faster whereas the latter is more flexible and can be used in an environment where fork is not available, for instance. :param lock: You can pass a multiprocessing lock here, if you already have instantiated one. Leave ``None`` if you want the wrapper to create one in case of ``'LOCK'`` wrapping. :param queue: You can pass a multiprocessing queue here, if you already instantiated one. Leave ``None`` if you want the wrapper to create one in case of ''`QUEUE'`` wrapping. :param queue_maxsize: Maximum size of queue if created new. 0 means infinite. :param port: Port to be used by lock server in case of ``'NETLOCK'`` wrapping. Can be a single integer as well as a tuple ``(7777, 9999)`` to specify a range of ports from which to pick a random one. Leave `None` for using pyzmq's default range. In case automatic determining of the host's ip address fails, you can also pass the full address (including the protocol and the port) of the host in the network like ``'tcp://127.0.0.1:7777'``. :param timeout: Timeout for a NETLOCK wrapping in seconds. After ``timeout`` seconds a lock is automatically released and free for other processes. :param gc_interval: Interval (in runs or storage operations) with which ``gc.collect()`` should be called in case of the ``'LOCAL'``, ``'QUEUE'``, or ``'PIPE'`` wrapping. Leave ``None`` for never. ``1`` means after every storing, ``2`` after every second storing, and so on. Only calls ``gc.collect()`` in the main (if ``'LOCAL'`` wrapping) or the queue/pipe process. If you need to garbage collect data within your single runs, you need to manually call ``gc.collect()``. Usually, there is no need to set this parameter since the Python garbage collection works quite nicely and schedules collection automatically. :param log_config: Path to logging config file or dictionary to configure logging for the spawned queue process. Thus, only considered if the queue wrap mode is chosen. :param log_stdout: If stdout of the queue process should also be logged. :param graceful_exit: Hitting Ctrl+C won't kill a server process unless hit twice. For an usage example see :ref:`example-16`. """ def __init__(self, trajectory, wrap_mode=pypetconstants.WRAP_MODE_LOCK, full_copy=None, manager=None, use_manager=True, lock=None, queue=None, queue_maxsize=0, port=None, timeout=None, gc_interval=None, log_config=None, log_stdout=False, graceful_exit=False): self._set_logger() self._manager = manager self._traj = trajectory self._storage_service = self._traj.v_storage_service self._queue_process = None self._pipe_process = None self._lock_wrapper = None self._queue_wrapper = None self._reference_wrapper = None self._wrap_mode = wrap_mode self._queue = queue self._queue_maxsize = queue_maxsize self._pipe = queue self._max_buffer_size = queue_maxsize self._lock = lock self._lock_process = None self._port = port self._timeout = timeout self._use_manager = use_manager self._logging_manager = None self._gc_interval = gc_interval self._graceful_exit = graceful_exit if (self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE or self._wrap_mode == pypetconstants.WRAP_MODE_PIPE or self._wrap_mode == pypetconstants.WRAP_MODE_NETLOCK or self._wrap_mode == pypetconstants.WRAP_MODE_NETQUEUE): self._logging_manager = LoggingManager(log_config=log_config, log_stdout=log_stdout) self._logging_manager.extract_replacements(self._traj) self._logging_manager.check_log_config() if full_copy is not None: self._traj.v_full_copy=full_copy @property def lock(self): return self._lock @property def queue(self): return self._queue @property def pipe(self): return self._pipe @property def queue_wrapper(self): return self._queue_wrapper @property def reference_wrapper(self): return self._reference_wrapper @property def lock_wrapper(self): return self._lock_wrapper @property def pipe_wrapper(self): return self._pipe_wrapper def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): self.finalize() def store_references(self, references): """In case of reference wrapping, stores data. :param references: References dictionary from a ReferenceWrapper. :param gc_collect: If ``gc.collect`` should be called. :param n: Alternatively if ``gc_interval`` is set, a current index can be passed. Data is stored in case ``n % gc_interval == 0``. """ self._reference_store.store_references(references) def start(self): """Starts the multiprocess wrapping. Automatically called when used as context manager. """ self._do_wrap() def _do_wrap(self): """ Wraps a Storage Service """ # First take care that the storage is initialised self._traj.f_store(only_init=True) if self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE: self._prepare_queue() elif self._wrap_mode == pypetconstants.WRAP_MODE_LOCK: self._prepare_lock() elif self._wrap_mode == pypetconstants.WRAP_MODE_PIPE: self._prepare_pipe() elif self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL: self._prepare_local() elif self._wrap_mode == pypetconstants.WRAP_MODE_NETLOCK: self._prepare_netlock() elif self._wrap_mode == pypetconstants.WRAP_MODE_NETQUEUE: self._prepare_netqueue() else: raise RuntimeError('The mutliprocessing mode %s, your choice is ' 'not supported, use %s`, `%s`, %s, `%s`, or `%s`.' % (self._wrap_mode, pypetconstants.WRAP_MODE_QUEUE, pypetconstants.WRAP_MODE_LOCK, pypetconstants.WRAP_MODE_PIPE, pypetconstants.WRAP_MODE_LOCAL, pypetconstants.WRAP_MODE_NETLOCK)) def _prepare_local(self): reference_wrapper = ReferenceWrapper() self._traj.v_storage_service = reference_wrapper self._reference_wrapper = reference_wrapper self._reference_store = ReferenceStore(self._storage_service, self._gc_interval) def _prepare_netlock(self): """ Replaces the trajectory's service with a LockWrapper """ if not isinstance(self._port, compat.base_type): url = port_to_tcp(self._port) self._logger.info('Determined Server URL: `%s`' % url) else: url = self._port if self._lock is None: if hasattr(os, 'fork'): self._lock = ForkAwareLockerClient(url) else: self._lock = LockerClient(url) if self._timeout is None: lock_server = LockerServer(url) else: lock_server = TimeOutLockerServer(url, self._timeout) self._logger.info('Using timeout aware lock server.') self._lock_process = multip.Process(name='LockServer', target=_wrap_handling, args=(dict(handler=lock_server, logging_manager=self._logging_manager, graceful_exit=self._graceful_exit),)) # self._lock_process = threading.Thread(name='LockServer', target=_wrap_handling, # args=(dict(handler=lock_server, # logging_manager=self._logging_manager),)) self._lock_process.start() self._lock.start() # Wrap around the storage service to allow the placement of locks around # the storage procedure. lock_wrapper = LockWrapper(self._storage_service, self._lock) self._traj.v_storage_service = lock_wrapper self._lock_wrapper = lock_wrapper def _prepare_lock(self): """ Replaces the trajectory's service with a LockWrapper """ if self._lock is None: if self._use_manager: if self._manager is None: self._manager = multip.Manager() # We need a lock that is shared by all processes. self._lock = self._manager.Lock() else: self._lock = multip.Lock() # Wrap around the storage service to allow the placement of locks around # the storage procedure. lock_wrapper = LockWrapper(self._storage_service, self._lock) self._traj.v_storage_service = lock_wrapper self._lock_wrapper = lock_wrapper def _prepare_pipe(self): """ Replaces the trajectory's service with a queue sender and starts the queue process. """ if self._pipe is None: self._pipe = multip.Pipe(True) if self._lock is None: self._lock = multip.Lock() self._logger.info('Starting the Storage Pipe!') # Wrap a queue writer around the storage service pipe_handler = PipeStorageServiceWriter(self._storage_service, self._pipe[0], max_buffer_size=self._max_buffer_size) # Start the queue process self._pipe_process = multip.Process(name='PipeProcess', target=_wrap_handling, args=(dict(handler=pipe_handler, logging_manager=self._logging_manager, graceful_exit=self._graceful_exit),)) self._pipe_process.start() # Replace the storage service of the trajectory by a sender. # The sender will put all data onto the pipe. # The writer from above will receive the data from # the pipe and hand it over to # the storage service self._pipe_wrapper = PipeStorageServiceSender(self._pipe[1], self._lock) self._traj.v_storage_service = self._pipe_wrapper def _prepare_queue(self): """ Replaces the trajectory's service with a queue sender and starts the queue process. """ if self._queue is None: if self._use_manager: if self._manager is None: self._manager = multip.Manager() self._queue = self._manager.Queue(maxsize=self._queue_maxsize) else: self._queue = multip.Queue(maxsize=self._queue_maxsize) self._logger.info('Starting the Storage Queue!') # Wrap a queue writer around the storage service queue_handler = QueueStorageServiceWriter(self._storage_service, self._queue, self._gc_interval) # Start the queue process self._queue_process = multip.Process(name='QueueProcess', target=_wrap_handling, args=(dict(handler=queue_handler, logging_manager=self._logging_manager, graceful_exit=self._graceful_exit),)) self._queue_process.start() # Replace the storage service of the trajectory by a sender. # The sender will put all data onto the queue. # The writer from above will receive the data from # the queue and hand it over to # the storage service self._queue_wrapper = QueueStorageServiceSender(self._queue) self._traj.v_storage_service = self._queue_wrapper def _prepare_netqueue(self): """ Replaces the trajectory's service with a queue sender and starts the queue process. """ self._logger.info('Starting Network Queue!') if not isinstance(self._port, compat.base_type): url = port_to_tcp(self._port) self._logger.info('Determined Server URL: `%s`' % url) else: url = self._port if self._queue is None: if hasattr(os, 'fork'): self._queue = ForkAwareQueuingClient(url) else: self._queue = QueuingClient(url) # Wrap a queue writer around the storage service queuing_server_handler = QueuingServer(url, self._storage_service, self._queue_maxsize, self._gc_interval) # Start the queue process self._queue_process = multip.Process(name='QueuingServerProcess', target=_wrap_handling, args=(dict(handler=queuing_server_handler, logging_manager=self._logging_manager, graceful_exit=self._graceful_exit),)) self._queue_process.start() self._queue.start() # Replace the storage service of the trajectory by a sender. # The sender will put all data onto the queue. # The writer from above will receive the data from # the queue and hand it over to # the storage service self._queue_wrapper = QueueStorageServiceSender(self._queue) self._traj.v_storage_service = self._queue_wrapper def finalize(self): """ Restores the original storage service. If a queue process and a manager were used both are shut down. Automatically called when used as context manager. """ if (self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE and self._queue_process is not None): self._logger.info('The Storage Queue will no longer accept new data. ' 'Hang in there for a little while. ' 'There still might be some data in the queue that ' 'needs to be stored.') # We might have passed the queue implicitly, # to be sure we add the queue here again self._traj.v_storage_service.queue = self._queue self._traj.v_storage_service.send_done() self._queue_process.join() if hasattr(self._queue, 'join'): self._queue.join() if hasattr(self._queue, 'close'): self._queue.close() if hasattr(self._queue, 'join_thread'): self._queue.join_thread() self._logger.info('The Storage Queue has joined.') elif (self._wrap_mode == pypetconstants.WRAP_MODE_PIPE and self._pipe_process is not None): self._logger.info('The Storage Pipe will no longer accept new data. ' 'Hang in there for a little while. ' 'There still might be some data in the pipe that ' 'needs to be stored.') self._traj.v_storage_service.conn = self._pipe[1] self._traj.v_storage_service.send_done() self._pipe_process.join() self._pipe[1].close() self._pipe[0].close() elif (self._wrap_mode == pypetconstants.WRAP_MODE_NETLOCK and self._lock_process is not None): self._lock.send_done() self._lock.finalize() self._lock_process.join() elif (self._wrap_mode == pypetconstants.WRAP_MODE_NETQUEUE and self._queue_process is not None): self._queue.send_done() self._queue.finalize() self._queue_process.join() if self._manager is not None: self._manager.shutdown() self._manager = None self._queue_process = None self._queue = None self._queue_wrapper = None self._lock = None self._lock_wrapper = None self._lock_process = None self._reference_wrapper = None self._pipe = None self._pipe_process = None self._pipe_wrapper = None self._logging_manager = None self._traj._storage_service = self._storage_service def __del__(self): self.finalize()
nigroup/pypet
pypet/environment.py
Python
bsd-3-clause
148,936
[ "Brian" ]
71c281975617c32464bc375f839001fc4acbed4993cbd536dc6eacc3eda39bd0
#!/usr/bin/env python3 from setuptools import setup setup( name='mir.msmtpq', version='1.0.1', description='Message queue for msmtp', long_description='', keywords='', url='https://github.com/darkfeline/mir.msmtpq', author='Allen Li', author_email='darkfeline@felesatra.moe', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.6', ], packages=['mir.msmtpq'], entry_points={ 'console_scripts': [ 'msmtpq = mir.msmtpq.__main__:main', ], }, )
darkfeline/pymsmtpq
setup.py
Python
apache-2.0
732
[ "MOE" ]
33d9668604944ef89ade23820ff54626f3c0ba564cf6326fbbd84b0dfaca31bc
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # BOOLEANS_TRUE = ['y', 'yes', 'on', '1', 'true', 1, True] BOOLEANS_FALSE = ['n', 'no', 'off', '0', 'false', 0, False] BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE SIZE_RANGES = { 'Y': 1<<80, 'Z': 1<<70, 'E': 1<<60, 'P': 1<<50, 'T': 1<<40, 'G': 1<<30, 'M': 1<<20, 'K': 1<<10, 'B': 1 } FILE_ATTRIBUTES = { 'A': 'noatime', 'a': 'append', 'c': 'compressed', 'C': 'nocow', 'd': 'nodump', 'D': 'dirsync', 'e': 'extents', 'E': 'encrypted', 'h': 'blocksize', 'i': 'immutable', 'I': 'indexed', 'j': 'journalled', 'N': 'inline', 's': 'zero', 'S': 'synchronous', 't': 'notail', 'T': 'blockroot', 'u': 'undelete', 'X': 'compressedraw', 'Z': 'compresseddirty', } # ansible modules can be written in any language. To simplify # development of Python modules, the functions available here can # be used to do many common tasks import locale import os import re import pipes import shlex import subprocess import sys import types import time import select import shutil import stat import tempfile import traceback import grp import pwd import platform import errno import datetime from itertools import repeat, chain try: import syslog HAS_SYSLOG=True except ImportError: HAS_SYSLOG=False try: from systemd import journal has_journal = True except ImportError: has_journal = False HAVE_SELINUX=False try: import selinux HAVE_SELINUX=True except ImportError: pass # Python2 & 3 way to get NoneType NoneType = type(None) try: from collections import Sequence, Mapping except ImportError: # python2.5 Sequence = (list, tuple) Mapping = (dict,) # Note: When getting Sequence from collections, it matches with strings. If # this matters, make sure to check for strings before checking for sequencetype try: from collections.abc import KeysView SEQUENCETYPE = (Sequence, KeysView) except: SEQUENCETYPE = Sequence try: import json # Detect the python-json library which is incompatible # Look for simplejson if that's the case try: if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType): raise ImportError except AttributeError: raise ImportError except ImportError: try: import simplejson as json except ImportError: print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}') sys.exit(1) except SyntaxError: print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}') sys.exit(1) AVAILABLE_HASH_ALGORITHMS = dict() try: import hashlib # python 2.7.9+ and 2.7.0+ for attribute in ('available_algorithms', 'algorithms'): algorithms = getattr(hashlib, attribute, None) if algorithms: break if algorithms is None: # python 2.5+ algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') for algorithm in algorithms: AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm) except ImportError: import sha AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha} try: import md5 AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5 except ImportError: pass from ansible.module_utils.pycompat24 import get_exception, literal_eval from ansible.module_utils.six import (PY2, PY3, b, binary_type, integer_types, iteritems, text_type, string_types) from ansible.module_utils.six.moves import map, reduce from ansible.module_utils._text import to_native, to_bytes, to_text PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I) _NUMBERTYPES = tuple(list(integer_types) + [float]) # Deprecated compat. Only kept in case another module used these names Using # ansible.module_utils.six is preferred NUMBERTYPES = _NUMBERTYPES imap = map try: # Python 2 unicode except NameError: # Python 3 unicode = text_type try: # Python 2.6+ bytes except NameError: # Python 2.4 bytes = binary_type try: # Python 2 basestring except NameError: # Python 3 basestring = string_types _literal_eval = literal_eval # End of deprecated names # Internal global holding passed in params. This is consulted in case # multiple AnsibleModules are created. Otherwise each AnsibleModule would # attempt to read from stdin. Other code should not use this directly as it # is an internal implementation detail _ANSIBLE_ARGS = None FILE_COMMON_ARGUMENTS=dict( src = dict(), mode = dict(type='raw'), owner = dict(), group = dict(), seuser = dict(), serole = dict(), selevel = dict(), setype = dict(), follow = dict(type='bool', default=False), # not taken by the file module, but other modules call file so it must ignore them. content = dict(no_log=True), backup = dict(), force = dict(), remote_src = dict(), # used by assemble regexp = dict(), # used by assemble delimiter = dict(), # used by assemble directory_mode = dict(), # used by copy unsafe_writes = dict(type='bool'), # should be available to any module using atomic_move attributes = dict(aliases=['attr']), ) PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?') # Can't use 07777 on Python 3, can't use 0o7777 on Python 2.4 PERM_BITS = int('07777', 8) # file mode permission bits EXEC_PERM_BITS = int('00111', 8) # execute permission bits DEFAULT_PERM = int('0666', 8) # default file permission bits def get_platform(): ''' what's the platform? example: Linux is a platform. ''' return platform.system() def get_distribution(): ''' return the distribution name ''' if platform.system() == 'Linux': try: supported_dists = platform._supported_dists + ('arch','alpine') distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize() if not distribution and os.path.isfile('/etc/system-release'): distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize() if 'Amazon' in distribution: distribution = 'Amazon' else: distribution = 'OtherLinux' except: # FIXME: MethodMissing, I assume? distribution = platform.dist()[0].capitalize() else: distribution = None return distribution def get_distribution_version(): ''' return the distribution version ''' if platform.system() == 'Linux': try: distribution_version = platform.linux_distribution()[1] if not distribution_version and os.path.isfile('/etc/system-release'): distribution_version = platform.linux_distribution(supported_dists=['system'])[1] except: # FIXME: MethodMissing, I assume? distribution_version = platform.dist()[1] else: distribution_version = None return distribution_version def get_all_subclasses(cls): ''' used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class. __subclasses__ return only direct sub classes. This one go down into the class tree. ''' # Retrieve direct subclasses subclasses = cls.__subclasses__() to_visit = list(subclasses) # Then visit all subclasses while to_visit: for sc in to_visit: # The current class is now visited, so remove it from list to_visit.remove(sc) # Appending all subclasses to visit and keep a reference of available class for ssc in sc.__subclasses__(): subclasses.append(ssc) to_visit.append(ssc) return subclasses def load_platform_subclass(cls, *args, **kwargs): ''' used by modules like User to have different implementations based on detected platform. See User module for an example. ''' this_platform = get_platform() distribution = get_distribution() subclass = None # get the most specific superclass for this platform if distribution is not None: for sc in get_all_subclasses(cls): if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform: subclass = sc if subclass is None: for sc in get_all_subclasses(cls): if sc.platform == this_platform and sc.distribution is None: subclass = sc if subclass is None: subclass = cls return super(cls, subclass).__new__(subclass) def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'): ''' Recursively convert dict keys and values to byte str Specialized for json return because this only handles, lists, tuples, and dict container types (the containers that the json module returns) ''' if isinstance(d, text_type): return to_bytes(d, encoding=encoding, errors=errors) elif isinstance(d, dict): return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors))) elif isinstance(d, list): return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors))) elif isinstance(d, tuple): return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors))) else: return d def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'): ''' Recursively convert dict keys and values to byte str Specialized for json return because this only handles, lists, tuples, and dict container types (the containers that the json module returns) ''' if isinstance(d, binary_type): # Warning, can traceback return to_text(d, encoding=encoding, errors=errors) elif isinstance(d, dict): return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors))) elif isinstance(d, list): return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors))) elif isinstance(d, tuple): return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors))) else: return d def return_values(obj): """ Return native stringified values from datastructures. For use with removing sensitive values pre-jsonification.""" if isinstance(obj, (text_type, binary_type)): if obj: yield to_native(obj, errors='surrogate_or_strict') return elif isinstance(obj, SEQUENCETYPE): for element in obj: for subelement in return_values(element): yield subelement elif isinstance(obj, Mapping): for element in obj.items(): for subelement in return_values(element[1]): yield subelement elif isinstance(obj, (bool, NoneType)): # This must come before int because bools are also ints return elif isinstance(obj, NUMBERTYPES): yield to_native(obj, nonstring='simplerepr') else: raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj)) def remove_values(value, no_log_strings): """ Remove strings in no_log_strings from value. If value is a container type, then remove a lot more""" if isinstance(value, (text_type, binary_type)): # Need native str type native_str_value = value if isinstance(value, text_type): value_is_text = True if PY2: native_str_value = to_bytes(value, encoding='utf-8', errors='surrogate_or_strict') elif isinstance(value, binary_type): value_is_text = False if PY3: native_str_value = to_text(value, encoding='utf-8', errors='surrogate_or_strict') if native_str_value in no_log_strings: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' for omit_me in no_log_strings: native_str_value = native_str_value.replace(omit_me, '*' * 8) if value_is_text and isinstance(native_str_value, binary_type): value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace') elif not value_is_text and isinstance(native_str_value, text_type): value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace') else: value = native_str_value elif isinstance(value, SEQUENCETYPE): return [remove_values(elem, no_log_strings) for elem in value] elif isinstance(value, Mapping): return dict((k, remove_values(v, no_log_strings)) for k, v in value.items()) elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))): stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict') if stringy_value in no_log_strings: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' for omit_me in no_log_strings: if omit_me in stringy_value: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' elif isinstance(value, datetime.datetime): value = value.isoformat() else: raise TypeError('Value of unknown type: %s, %s' % (type(value), value)) return value def heuristic_log_sanitize(data, no_log_values=None): ''' Remove strings that look like passwords from log messages ''' # Currently filters: # user:pass@foo/whatever and http://username:pass@wherever/foo # This code has false positives and consumes parts of logs that are # not passwds # begin: start of a passwd containing string # end: end of a passwd containing string # sep: char between user and passwd # prev_begin: where in the overall string to start a search for # a passwd # sep_search_end: where in the string to end a search for the sep data = to_native(data) output = [] begin = len(data) prev_begin = begin sep = 1 while sep: # Find the potential end of a passwd try: end = data.rindex('@', 0, begin) except ValueError: # No passwd in the rest of the data output.insert(0, data[0:begin]) break # Search for the beginning of a passwd sep = None sep_search_end = end while not sep: # URL-style username+password try: begin = data.rindex('://', 0, sep_search_end) except ValueError: # No url style in the data, check for ssh style in the # rest of the string begin = 0 # Search for separator try: sep = data.index(':', begin + 3, end) except ValueError: # No separator; choices: if begin == 0: # Searched the whole string so there's no password # here. Return the remaining data output.insert(0, data[0:begin]) break # Search for a different beginning of the password field. sep_search_end = begin continue if sep: # Password was found; remove it. output.insert(0, data[end:prev_begin]) output.insert(0, '********') output.insert(0, data[begin:sep + 1]) prev_begin = begin output = ''.join(output) if no_log_values: output = remove_values(output, no_log_values) return output def bytes_to_human(size, isbits=False, unit=None): base = 'Bytes' if isbits: base = 'bits' suffix = '' for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]): if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]: break if limit != 1: suffix += base[0] else: suffix = base return '%.2f %s' % (float(size)/ limit, suffix) def human_to_bytes(number, default_unit=None, isbits=False): ''' Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument ex: human_to_bytes('10M') <=> human_to_bytes(10, 'M') ''' m = re.search('^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE) if m is None: raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number)) try: num = float(m.group(1)) except: raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number)) unit = m.group(2) if unit is None: unit = default_unit if unit is None: ''' No unit given, returning raw number ''' return int(round(num)) range_key = unit[0].upper() try: limit = SIZE_RANGES[range_key] except: raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys()))) # default value unit_class = 'B' unit_class_name = 'byte' # handling bits case if isbits: unit_class = 'b' unit_class_name = 'bit' # check unit value if more than one character (KB, MB) if len(unit) > 1: expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key) if range_key == 'B': expect_message = 'expect %s or %s' % (unit_class, unit_class_name) if unit_class_name in unit.lower(): pass elif unit[1] != unit_class: raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message)) return int(round(num * limit)) def is_executable(path): '''is the given path executable? Limitations: * Does not account for FSACLs. * Most times we really want to know "Can the current user execute this file" This function does not tell us that, only if an execute bit is set. ''' # These are all bitfields so first bitwise-or all the permissions we're # looking for, then bitwise-and with the file's mode to determine if any # execute bits are set. return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE]) def _load_params(): ''' read the modules parameters and store them globally. This function may be needed for certain very dynamic custom modules which want to process the parameters that are being handed the module. Since this is so closely tied to the implementation of modules we cannot guarantee API stability for it (it may change between versions) however we will try not to break it gratuitously. It is certainly more future-proof to call this function and consume its outputs than to implement the logic inside it as a copy in your own code. ''' global _ANSIBLE_ARGS if _ANSIBLE_ARGS is not None: buffer = _ANSIBLE_ARGS else: # debug overrides to read args from file or cmdline # Avoid tracebacks when locale is non-utf8 # We control the args and we pass them as utf8 if len(sys.argv) > 1: if os.path.isfile(sys.argv[1]): fd = open(sys.argv[1], 'rb') buffer = fd.read() fd.close() else: buffer = sys.argv[1] if PY3: buffer = buffer.encode('utf-8', errors='surrogateescape') # default case, read from stdin else: if PY2: buffer = sys.stdin.read() else: buffer = sys.stdin.buffer.read() _ANSIBLE_ARGS = buffer try: params = json.loads(buffer.decode('utf-8')) except ValueError: # This helper used too early for fail_json to work. print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}') sys.exit(1) if PY2: params = json_dict_unicode_to_bytes(params) try: return params['ANSIBLE_MODULE_ARGS'] except KeyError: # This helper does not have access to fail_json so we have to print # json output on our own. print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}') sys.exit(1) def env_fallback(*args, **kwargs): ''' Load value from environment ''' for arg in args: if arg in os.environ: return os.environ[arg] else: raise AnsibleFallbackNotFound def _lenient_lowercase(lst): """Lowercase elements of a list. If an element is not a string, pass it through untouched. """ lowered = [] for value in lst: try: lowered.append(value.lower()) except AttributeError: lowered.append(value) return lowered def format_attributes(attributes): attribute_list = [] for attr in attributes: if attr in FILE_ATTRIBUTES: attribute_list.append(FILE_ATTRIBUTES[attr]) return attribute_list def get_flags_from_attributes(attributes): flags = [] for key,attr in FILE_ATTRIBUTES.items(): if attr in attributes: flags.append(key) return ''.join(flags) class AnsibleFallbackNotFound(Exception): pass class AnsibleModule(object): def __init__(self, argument_spec, bypass_checks=False, no_log=False, check_invalid_arguments=True, mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False, supports_check_mode=False, required_if=None): ''' common code for quickly building an ansible module in Python (although you can write modules in anything that can return JSON) see library/* for examples ''' self._name = os.path.basename(__file__) #initialize name until we can parse from options self.argument_spec = argument_spec self.supports_check_mode = supports_check_mode self.check_mode = False self.no_log = no_log self.cleanup_files = [] self._debug = False self._diff = False self._socket_path = None self._verbosity = 0 # May be used to set modifications to the environment for any # run_command invocation self.run_command_environ_update = {} self._warnings = [] self._deprecations = [] self.aliases = {} self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity', '_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility', '_ansible_socket'] if add_file_common_args: for k, v in FILE_COMMON_ARGUMENTS.items(): if k not in self.argument_spec: self.argument_spec[k] = v self._load_params() self._set_fallbacks() # append to legal_inputs and then possibly check against them try: self.aliases = self._handle_aliases() except Exception: e = get_exception() # Use exceptions here because it isn't safe to call fail_json until no_log is processed print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e)) sys.exit(1) # Save parameter values that should never be logged self.no_log_values = set() # Use the argspec to determine which args are no_log for arg_name, arg_opts in self.argument_spec.items(): if arg_opts.get('no_log', False): # Find the value for the no_log'd param no_log_object = self.params.get(arg_name, None) if no_log_object: self.no_log_values.update(return_values(no_log_object)) if arg_opts.get('removed_in_version') is not None and arg_name in self.params: self._deprecations.append({ 'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name, 'version': arg_opts.get('removed_in_version') }) # check the locale as set by the current environment, and reset to # a known valid (LANG=C) if it's an invalid/unavailable locale self._check_locale() self._check_arguments(check_invalid_arguments) # check exclusive early if not bypass_checks: self._check_mutually_exclusive(mutually_exclusive) self._set_defaults(pre=True) self._CHECK_ARGUMENT_TYPES_DISPATCHER = { 'str': self._check_type_str, 'list': self._check_type_list, 'dict': self._check_type_dict, 'bool': self._check_type_bool, 'int': self._check_type_int, 'float': self._check_type_float, 'path': self._check_type_path, 'raw': self._check_type_raw, 'jsonarg': self._check_type_jsonarg, 'json': self._check_type_jsonarg, 'bytes': self._check_type_bytes, 'bits': self._check_type_bits, } if not bypass_checks: self._check_required_arguments() self._check_argument_types() self._check_argument_values() self._check_required_together(required_together) self._check_required_one_of(required_one_of) self._check_required_if(required_if) self._set_defaults(pre=False) if not self.no_log: self._log_invocation() # finally, make sure we're in a sane working dir self._set_cwd() def warn(self, warning): if isinstance(warning, string_types): self._warnings.append(warning) self.log('[WARNING] %s' % warning) else: raise TypeError("warn requires a string not a %s" % type(warning)) def deprecate(self, msg, version=None): if isinstance(msg, string_types): self._deprecations.append({ 'msg': msg, 'version': version }) self.log('[DEPRECATION WARNING] %s %s' % (msg, version)) else: raise TypeError("deprecate requires a string not a %s" % type(msg)) def load_file_common_arguments(self, params): ''' many modules deal with files, this encapsulates common options that the file module accepts such that it is directly available to all modules and they can share code. ''' path = params.get('path', params.get('dest', None)) if path is None: return {} else: path = os.path.expanduser(os.path.expandvars(path)) b_path = to_bytes(path, errors='surrogate_or_strict') # if the path is a symlink, and we're following links, get # the target of the link instead for testing if params.get('follow', False) and os.path.islink(b_path): b_path = os.path.realpath(b_path) path = to_native(b_path) mode = params.get('mode', None) owner = params.get('owner', None) group = params.get('group', None) # selinux related options seuser = params.get('seuser', None) serole = params.get('serole', None) setype = params.get('setype', None) selevel = params.get('selevel', None) secontext = [seuser, serole, setype] if self.selinux_mls_enabled(): secontext.append(selevel) default_secontext = self.selinux_default_context(path) for i in range(len(default_secontext)): if i is not None and secontext[i] == '_default': secontext[i] = default_secontext[i] attributes = params.get('attributes', None) return dict( path=path, mode=mode, owner=owner, group=group, seuser=seuser, serole=serole, setype=setype, selevel=selevel, secontext=secontext, attributes=attributes, ) # Detect whether using selinux that is MLS-aware. # While this means you can set the level/range with # selinux.lsetfilecon(), it may or may not mean that you # will get the selevel as part of the context returned # by selinux.lgetfilecon(). def selinux_mls_enabled(self): if not HAVE_SELINUX: return False if selinux.is_selinux_mls_enabled() == 1: return True else: return False def selinux_enabled(self): if not HAVE_SELINUX: seenabled = self.get_bin_path('selinuxenabled') if seenabled is not None: (rc,out,err) = self.run_command(seenabled) if rc == 0: self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!") return False if selinux.is_selinux_enabled() == 1: return True else: return False # Determine whether we need a placeholder for selevel/mls def selinux_initial_context(self): context = [None, None, None] if self.selinux_mls_enabled(): context.append(None) return context # If selinux fails to find a default, return an array of None def selinux_default_context(self, path, mode=0): context = self.selinux_initial_context() if not HAVE_SELINUX or not self.selinux_enabled(): return context try: ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode) except OSError: return context if ret[0] == -1: return context # Limit split to 4 because the selevel, the last in the list, # may contain ':' characters context = ret[1].split(':', 3) return context def selinux_context(self, path): context = self.selinux_initial_context() if not HAVE_SELINUX or not self.selinux_enabled(): return context try: ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict')) except OSError: e = get_exception() if e.errno == errno.ENOENT: self.fail_json(path=path, msg='path %s does not exist' % path) else: self.fail_json(path=path, msg='failed to retrieve selinux context') if ret[0] == -1: return context # Limit split to 4 because the selevel, the last in the list, # may contain ':' characters context = ret[1].split(':', 3) return context def user_and_group(self, filename): filename = os.path.expanduser(os.path.expandvars(filename)) b_filename = to_bytes(filename, errors='surrogate_or_strict') st = os.lstat(b_filename) uid = st.st_uid gid = st.st_gid return (uid, gid) def find_mount_point(self, path): path = os.path.realpath(os.path.expanduser(os.path.expandvars(path))) while not os.path.ismount(path): path = os.path.dirname(path) return path def is_special_selinux_path(self, path): """ Returns a tuple containing (True, selinux_context) if the given path is on a NFS or other 'special' fs mount point, otherwise the return will be (False, None). """ try: f = open('/proc/mounts', 'r') mount_data = f.readlines() f.close() except: return (False, None) path_mount_point = self.find_mount_point(path) for line in mount_data: (device, mount_point, fstype, options, rest) = line.split(' ', 4) if path_mount_point == mount_point: for fs in self._selinux_special_fs: if fs in fstype: special_context = self.selinux_context(path_mount_point) return (True, special_context) return (False, None) def set_default_selinux_context(self, path, changed): if not HAVE_SELINUX or not self.selinux_enabled(): return changed context = self.selinux_default_context(path) return self.set_context_if_different(path, context, False) def set_context_if_different(self, path, context, changed, diff=None): if not HAVE_SELINUX or not self.selinux_enabled(): return changed cur_context = self.selinux_context(path) new_context = list(cur_context) # Iterate over the current context instead of the # argument context, which may have selevel. (is_special_se, sp_context) = self.is_special_selinux_path(path) if is_special_se: new_context = sp_context else: for i in range(len(cur_context)): if len(context) > i: if context[i] is not None and context[i] != cur_context[i]: new_context[i] = context[i] elif context[i] is None: new_context[i] = cur_context[i] if cur_context != new_context: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['secontext'] = cur_context if 'after' not in diff: diff['after'] = {} diff['after']['secontext'] = new_context try: if self.check_mode: return True rc = selinux.lsetfilecon(to_native(path), str(':'.join(new_context))) except OSError: e = get_exception() self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context) if rc != 0: self.fail_json(path=path, msg='set selinux context failed') changed = True return changed def set_owner_if_different(self, path, owner, changed, diff=None): path = os.path.expanduser(os.path.expandvars(path)) b_path = to_bytes(path, errors='surrogate_or_strict') if owner is None: return changed orig_uid, orig_gid = self.user_and_group(path) try: uid = int(owner) except ValueError: try: uid = pwd.getpwnam(owner).pw_uid except KeyError: self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner) if orig_uid != uid: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['owner'] = orig_uid if 'after' not in diff: diff['after'] = {} diff['after']['owner'] = uid if self.check_mode: return True try: os.lchown(b_path, uid, -1) except OSError: self.fail_json(path=path, msg='chown failed') changed = True return changed def set_group_if_different(self, path, group, changed, diff=None): path = os.path.expanduser(os.path.expandvars(path)) b_path = to_bytes(path, errors='surrogate_or_strict') if group is None: return changed orig_uid, orig_gid = self.user_and_group(b_path) try: gid = int(group) except ValueError: try: gid = grp.getgrnam(group).gr_gid except KeyError: self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group) if orig_gid != gid: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['group'] = orig_gid if 'after' not in diff: diff['after'] = {} diff['after']['group'] = gid if self.check_mode: return True try: os.lchown(b_path, -1, gid) except OSError: self.fail_json(path=path, msg='chgrp failed') changed = True return changed def set_mode_if_different(self, path, mode, changed, diff=None): b_path = to_bytes(path, errors='surrogate_or_strict') b_path = os.path.expanduser(os.path.expandvars(b_path)) path_stat = os.lstat(b_path) if mode is None: return changed if not isinstance(mode, int): try: mode = int(mode, 8) except Exception: try: mode = self._symbolic_mode_to_octal(path_stat, mode) except Exception: e = get_exception() self.fail_json(path=path, msg="mode must be in octal or symbolic form", details=str(e)) if mode != stat.S_IMODE(mode): # prevent mode from having extra info orbeing invalid long number self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode) prev_mode = stat.S_IMODE(path_stat.st_mode) if prev_mode != mode: if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['mode'] = '0%03o' % prev_mode if 'after' not in diff: diff['after'] = {} diff['after']['mode'] = '0%03o' % mode if self.check_mode: return True # FIXME: comparison against string above will cause this to be executed # every time try: if hasattr(os, 'lchmod'): os.lchmod(b_path, mode) else: if not os.path.islink(b_path): os.chmod(b_path, mode) else: # Attempt to set the perms of the symlink but be # careful not to change the perms of the underlying # file while trying underlying_stat = os.stat(b_path) os.chmod(b_path, mode) new_underlying_stat = os.stat(b_path) if underlying_stat.st_mode != new_underlying_stat.st_mode: os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode)) except OSError: e = get_exception() if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links pass elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links pass else: raise e except Exception: e = get_exception() self.fail_json(path=path, msg='chmod failed', details=str(e)) path_stat = os.lstat(b_path) new_mode = stat.S_IMODE(path_stat.st_mode) if new_mode != prev_mode: changed = True return changed def set_attributes_if_different(self, path, attributes, changed, diff=None): if attributes is None: return changed b_path = to_bytes(path, errors='surrogate_or_strict') b_path = os.path.expanduser(os.path.expandvars(b_path)) existing = self.get_file_attributes(b_path) if existing.get('attr_flags','') != attributes: attrcmd = self.get_bin_path('chattr') if attrcmd: attrcmd = [attrcmd, '=%s' % attributes, b_path] changed = True if diff is not None: if 'before' not in diff: diff['before'] = {} diff['before']['attributes'] = existing.get('attr_flags') if 'after' not in diff: diff['after'] = {} diff['after']['attributes'] = attributes if not self.check_mode: try: rc, out, err = self.run_command(attrcmd) if rc != 0 or err: raise Exception("Error while setting attributes: %s" % (out + err)) except: e = get_exception() self.fail_json(path=path, msg='chattr failed', details=str(e)) return changed def get_file_attributes(self, path): output = {} attrcmd = self.get_bin_path('lsattr', False) if attrcmd: attrcmd = [attrcmd, '-vd', path] try: rc, out, err = self.run_command(attrcmd) if rc == 0: res = out.split(' ')[0:2] output['attr_flags'] = res[1].replace('-','').strip() output['version'] = res[0].strip() output['attributes'] = format_attributes(output['attr_flags']) except: pass return output def _symbolic_mode_to_octal(self, path_stat, symbolic_mode): new_mode = stat.S_IMODE(path_stat.st_mode) mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst-]*|[ugo])$') for mode in symbolic_mode.split(','): match = mode_re.match(mode) if match: users = match.group('users') operator = match.group('operator') perms = match.group('perms') if users == 'a': users = 'ugo' for user in users: mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms) new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode) else: raise ValueError("bad symbolic permission for mode: %s" % mode) return new_mode def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode): if operator == '=': if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX # mask out u, g, or o permissions from current_mode and apply new permissions inverse_mask = mask ^ PERM_BITS new_mode = (current_mode & inverse_mask) | mode_to_apply elif operator == '+': new_mode = current_mode | mode_to_apply elif operator == '-': new_mode = current_mode - (current_mode & mode_to_apply) return new_mode def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms): prev_mode = stat.S_IMODE(path_stat.st_mode) is_directory = stat.S_ISDIR(path_stat.st_mode) has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0 apply_X_permission = is_directory or has_x_permissions # Permission bits constants documented at: # http://docs.python.org/2/library/stat.html#stat.S_ISUID if apply_X_permission: X_perms = { 'u': {'X': stat.S_IXUSR}, 'g': {'X': stat.S_IXGRP}, 'o': {'X': stat.S_IXOTH} } else: X_perms = { 'u': {'X': 0}, 'g': {'X': 0}, 'o': {'X': 0} } user_perms_to_modes = { 'u': { 'r': stat.S_IRUSR, 'w': stat.S_IWUSR, 'x': stat.S_IXUSR, 's': stat.S_ISUID, 't': 0, 'u': prev_mode & stat.S_IRWXU, 'g': (prev_mode & stat.S_IRWXG) << 3, 'o': (prev_mode & stat.S_IRWXO) << 6 }, 'g': { 'r': stat.S_IRGRP, 'w': stat.S_IWGRP, 'x': stat.S_IXGRP, 's': stat.S_ISGID, 't': 0, 'u': (prev_mode & stat.S_IRWXU) >> 3, 'g': prev_mode & stat.S_IRWXG, 'o': (prev_mode & stat.S_IRWXO) << 3 }, 'o': { 'r': stat.S_IROTH, 'w': stat.S_IWOTH, 'x': stat.S_IXOTH, 's': 0, 't': stat.S_ISVTX, 'u': (prev_mode & stat.S_IRWXU) >> 6, 'g': (prev_mode & stat.S_IRWXG) >> 3, 'o': prev_mode & stat.S_IRWXO } } # Insert X_perms into user_perms_to_modes for key, value in X_perms.items(): user_perms_to_modes[key].update(value) or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm] return reduce(or_reduce, perms, 0) def set_fs_attributes_if_different(self, file_args, changed, diff=None): # set modes owners and context as needed changed = self.set_context_if_different( file_args['path'], file_args['secontext'], changed, diff ) changed = self.set_owner_if_different( file_args['path'], file_args['owner'], changed, diff ) changed = self.set_group_if_different( file_args['path'], file_args['group'], changed, diff ) changed = self.set_mode_if_different( file_args['path'], file_args['mode'], changed, diff ) changed = self.set_attributes_if_different( file_args['path'], file_args['attributes'], changed, diff ) return changed def set_directory_attributes_if_different(self, file_args, changed, diff=None): return self.set_fs_attributes_if_different(file_args, changed, diff) def set_file_attributes_if_different(self, file_args, changed, diff=None): return self.set_fs_attributes_if_different(file_args, changed, diff) def add_path_info(self, kwargs): ''' for results that are files, supplement the info about the file in the return path with stats about the file path. ''' path = kwargs.get('path', kwargs.get('dest', None)) if path is None: return kwargs b_path = to_bytes(path, errors='surrogate_or_strict') if os.path.exists(b_path): (uid, gid) = self.user_and_group(path) kwargs['uid'] = uid kwargs['gid'] = gid try: user = pwd.getpwuid(uid)[0] except KeyError: user = str(uid) try: group = grp.getgrgid(gid)[0] except KeyError: group = str(gid) kwargs['owner'] = user kwargs['group'] = group st = os.lstat(b_path) kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE]) # secontext not yet supported if os.path.islink(b_path): kwargs['state'] = 'link' elif os.path.isdir(b_path): kwargs['state'] = 'directory' elif os.stat(b_path).st_nlink > 1: kwargs['state'] = 'hard' else: kwargs['state'] = 'file' if HAVE_SELINUX and self.selinux_enabled(): kwargs['secontext'] = ':'.join(self.selinux_context(path)) kwargs['size'] = st[stat.ST_SIZE] else: kwargs['state'] = 'absent' return kwargs def _check_locale(self): ''' Uses the locale module to test the currently set locale (per the LANG and LC_CTYPE environment settings) ''' try: # setting the locale to '' uses the default locale # as it would be returned by locale.getdefaultlocale() locale.setlocale(locale.LC_ALL, '') except locale.Error: # fallback to the 'C' locale, which may cause unicode # issues but is preferable to simply failing because # of an unknown locale locale.setlocale(locale.LC_ALL, 'C') os.environ['LANG'] = 'C' os.environ['LC_ALL'] = 'C' os.environ['LC_MESSAGES'] = 'C' except Exception: e = get_exception() self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e) def _handle_aliases(self): # this uses exceptions as it happens before we can safely call fail_json aliases_results = {} #alias:canon for (k,v) in self.argument_spec.items(): self._legal_inputs.append(k) aliases = v.get('aliases', None) default = v.get('default', None) required = v.get('required', False) if default is not None and required: # not alias specific but this is a good place to check this raise Exception("internal error: required and default are mutually exclusive for %s" % k) if aliases is None: continue if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)): raise Exception('internal error: aliases must be a list or tuple') for alias in aliases: self._legal_inputs.append(alias) aliases_results[alias] = k if alias in self.params: self.params[k] = self.params[alias] return aliases_results def _check_arguments(self, check_invalid_arguments): self._syslog_facility = 'LOG_USER' unsupported_parameters = set() for (k,v) in list(self.params.items()): if k == '_ansible_check_mode' and v: self.check_mode = True elif k == '_ansible_no_log': self.no_log = self.boolean(v) elif k == '_ansible_debug': self._debug = self.boolean(v) elif k == '_ansible_diff': self._diff = self.boolean(v) elif k == '_ansible_verbosity': self._verbosity = v elif k == '_ansible_selinux_special_fs': self._selinux_special_fs = v elif k == '_ansible_syslog_facility': self._syslog_facility = v elif k == '_ansible_version': self.ansible_version = v elif k == '_ansible_module_name': self._name = v elif k == '_ansible_socket': self._socket_path = v elif check_invalid_arguments and k not in self._legal_inputs: unsupported_parameters.add(k) #clean up internal params: if k.startswith('_ansible_'): del self.params[k] if unsupported_parameters: self.fail_json(msg="Unsupported parameters for (%s) module: %s. Supported parameters include: %s" % (self._name, ','.join(sorted(list(unsupported_parameters))), ','.join(sorted(self.argument_spec.keys())))) if self.check_mode and not self.supports_check_mode: self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name) def _count_terms(self, check): count = 0 for term in check: if term in self.params: count += 1 return count def _check_mutually_exclusive(self, spec): if spec is None: return for check in spec: count = self._count_terms(check) if count > 1: self.fail_json(msg="parameters are mutually exclusive: %s" % (check,)) def _check_required_one_of(self, spec): if spec is None: return for check in spec: count = self._count_terms(check) if count == 0: self.fail_json(msg="one of the following is required: %s" % ','.join(check)) def _check_required_together(self, spec): if spec is None: return for check in spec: counts = [ self._count_terms([field]) for field in check ] non_zero = [ c for c in counts if c > 0 ] if len(non_zero) > 0: if 0 in counts: self.fail_json(msg="parameters are required together: %s" % (check,)) def _check_required_arguments(self): ''' ensure all required arguments are present ''' missing = [] for (k,v) in self.argument_spec.items(): required = v.get('required', False) if required and k not in self.params: missing.append(k) if len(missing) > 0: self.fail_json(msg="missing required arguments: %s" % ",".join(missing)) def _check_required_if(self, spec): ''' ensure that parameters which conditionally required are present ''' if spec is None: return for sp in spec: missing = [] max_missing_count = 0 is_one_of = False if len(sp) == 4: key, val, requirements, is_one_of = sp else: key, val, requirements = sp # is_one_of is True at least one requirement should be # present, else all requirements should be present. if is_one_of: max_missing_count = len(requirements) if key in self.params and self.params[key] == val: for check in requirements: count = self._count_terms((check,)) if count == 0: missing.append(check) if len(missing) and len(missing) >= max_missing_count: self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing))) def _check_argument_values(self): ''' ensure all arguments have the requested values, and there are no stray arguments ''' for (k,v) in self.argument_spec.items(): choices = v.get('choices',None) if choices is None: continue if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)): if k in self.params: if self.params[k] not in choices: # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking # the value. If we can't figure this out, module author is responsible. lowered_choices = None if self.params[k] == 'False': lowered_choices = _lenient_lowercase(choices) FALSEY = frozenset(BOOLEANS_FALSE) overlap = FALSEY.intersection(choices) if len(overlap) == 1: # Extract from a set (self.params[k],) = overlap if self.params[k] == 'True': if lowered_choices is None: lowered_choices = _lenient_lowercase(choices) TRUTHY = frozenset(BOOLEANS_TRUE) overlap = TRUTHY.intersection(choices) if len(overlap) == 1: (self.params[k],) = overlap if self.params[k] not in choices: choices_str=",".join([to_native(c) for c in choices]) msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k]) self.fail_json(msg=msg) else: self.fail_json(msg="internal error: choices for argument %s are not iterable: %s" % (k, choices)) def safe_eval(self, value, locals=None, include_exceptions=False): # do not allow method calls to modules if not isinstance(value, string_types): # already templated to a datavaluestructure, perhaps? if include_exceptions: return (value, None) return value if re.search(r'\w\.\w+\(', value): if include_exceptions: return (value, None) return value # do not allow imports if re.search(r'import \w+', value): if include_exceptions: return (value, None) return value try: result = literal_eval(value) if include_exceptions: return (result, None) else: return result except Exception: e = get_exception() if include_exceptions: return (value, e) return value def _check_type_str(self, value): if isinstance(value, string_types): return value # Note: This could throw a unicode error if value's __str__() method # returns non-ascii. Have to port utils.to_bytes() if that happens return str(value) def _check_type_list(self, value): if isinstance(value, list): return value if isinstance(value, string_types): return value.split(",") elif isinstance(value, int) or isinstance(value, float): return [ str(value) ] raise TypeError('%s cannot be converted to a list' % type(value)) def _check_type_dict(self, value): if isinstance(value, dict): return value if isinstance(value, string_types): if value.startswith("{"): try: return json.loads(value) except: (result, exc) = self.safe_eval(value, dict(), include_exceptions=True) if exc is not None: raise TypeError('unable to evaluate string as dictionary') return result elif '=' in value: fields = [] field_buffer = [] in_quote = False in_escape = False for c in value.strip(): if in_escape: field_buffer.append(c) in_escape = False elif c == '\\': in_escape = True elif not in_quote and c in ('\'', '"'): in_quote = c elif in_quote and in_quote == c: in_quote = False elif not in_quote and c in (',', ' '): field = ''.join(field_buffer) if field: fields.append(field) field_buffer = [] else: field_buffer.append(c) field = ''.join(field_buffer) if field: fields.append(field) return dict(x.split("=", 1) for x in fields) else: raise TypeError("dictionary requested, could not parse JSON or key=value") raise TypeError('%s cannot be converted to a dict' % type(value)) def _check_type_bool(self, value): if isinstance(value, bool): return value if isinstance(value, string_types) or isinstance(value, int): return self.boolean(value) raise TypeError('%s cannot be converted to a bool' % type(value)) def _check_type_int(self, value): if isinstance(value, int): return value if isinstance(value, string_types): return int(value) raise TypeError('%s cannot be converted to an int' % type(value)) def _check_type_float(self, value): if isinstance(value, float): return value if isinstance(value, (binary_type, text_type, int)): return float(value) raise TypeError('%s cannot be converted to a float' % type(value)) def _check_type_path(self, value): value = self._check_type_str(value) return os.path.expanduser(os.path.expandvars(value)) def _check_type_jsonarg(self, value): # Return a jsonified string. Sometimes the controller turns a json # string into a dict/list so transform it back into json here if isinstance(value, (text_type, binary_type)): return value.strip() else: if isinstance(value, (list, tuple, dict)): return json.dumps(value) raise TypeError('%s cannot be converted to a json string' % type(value)) def _check_type_raw(self, value): return value def _check_type_bytes(self, value): try: self.human_to_bytes(value) except ValueError: raise TypeError('%s cannot be converted to a Byte value' % type(value)) def _check_type_bits(self, value): try: self.human_to_bytes(value, isbits=True) except ValueError: raise TypeError('%s cannot be converted to a Bit value' % type(value)) def _check_argument_types(self): ''' ensure all arguments have the requested type ''' for (k, v) in self.argument_spec.items(): wanted = v.get('type', None) if k not in self.params: continue if wanted is None: # Mostly we want to default to str. # For values set to None explicitly, return None instead as # that allows a user to unset a parameter if self.params[k] is None: continue wanted = 'str' value = self.params[k] if value is None: continue try: type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted] except KeyError: self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k)) try: self.params[k] = type_checker(value) except (TypeError, ValueError): e = get_exception() self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" % (k, type(value), wanted, e)) def _set_defaults(self, pre=True): for (k,v) in self.argument_spec.items(): default = v.get('default', None) if pre == True: # this prevents setting defaults on required items if default is not None and k not in self.params: self.params[k] = default else: # make sure things without a default still get set None if k not in self.params: self.params[k] = default def _set_fallbacks(self): for k,v in self.argument_spec.items(): fallback = v.get('fallback', (None,)) fallback_strategy = fallback[0] fallback_args = [] fallback_kwargs = {} if k not in self.params and fallback_strategy is not None: for item in fallback[1:]: if isinstance(item, dict): fallback_kwargs = item else: fallback_args = item try: self.params[k] = fallback_strategy(*fallback_args, **fallback_kwargs) except AnsibleFallbackNotFound: continue def _load_params(self): ''' read the input and set the params attribute. This method is for backwards compatibility. The guts of the function were moved out in 2.1 so that custom modules could read the parameters. ''' # debug overrides to read args from file or cmdline self.params = _load_params() def _log_to_syslog(self, msg): if HAS_SYSLOG: module = 'ansible-%s' % self._name facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER) syslog.openlog(str(module), 0, facility) syslog.syslog(syslog.LOG_INFO, msg) def debug(self, msg): if self._debug: self.log('[debug] %s' % msg) def log(self, msg, log_args=None): if not self.no_log: if log_args is None: log_args = dict() module = 'ansible-%s' % self._name if isinstance(module, binary_type): module = module.decode('utf-8', 'replace') # 6655 - allow for accented characters if not isinstance(msg, (binary_type, text_type)): raise TypeError("msg should be a string (got %s)" % type(msg)) # We want journal to always take text type # syslog takes bytes on py2, text type on py3 if isinstance(msg, binary_type): journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values) else: # TODO: surrogateescape is a danger here on Py3 journal_msg = remove_values(msg, self.no_log_values) if PY3: syslog_msg = journal_msg else: syslog_msg = journal_msg.encode('utf-8', 'replace') if has_journal: journal_args = [("MODULE", os.path.basename(__file__))] for arg in log_args: journal_args.append((arg.upper(), str(log_args[arg]))) try: journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args)) except IOError: # fall back to syslog since logging to journal failed self._log_to_syslog(syslog_msg) else: self._log_to_syslog(syslog_msg) def _log_invocation(self): ''' log that ansible ran the module ''' # TODO: generalize a separate log function and make log_invocation use it # Sanitize possible password argument when logging. log_args = dict() for param in self.params: canon = self.aliases.get(param, param) arg_opts = self.argument_spec.get(canon, {}) no_log = arg_opts.get('no_log', False) arg_type = arg_opts.get('type', 'str') if self.boolean(no_log): log_args[param] = 'NOT_LOGGING_PARAMETER' # try to capture all passwords/passphrase named fields elif arg_type != 'bool' and PASSWORD_MATCH.search(param): log_args[param] = 'NOT_LOGGING_PASSWORD' self.warn('Module did not set no_log for %s' % param) else: param_val = self.params[param] if not isinstance(param_val, (text_type, binary_type)): param_val = str(param_val) elif isinstance(param_val, text_type): param_val = param_val.encode('utf-8') log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values) msg = [] for arg in log_args: arg_val = log_args[arg] if not isinstance(arg_val, (text_type, binary_type)): arg_val = str(arg_val) elif isinstance(arg_val, text_type): arg_val = arg_val.encode('utf-8') msg.append('%s=%s' % (arg, arg_val)) if msg: msg = 'Invoked with %s' % ' '.join(msg) else: msg = 'Invoked' self.log(msg, log_args=log_args) def _set_cwd(self): try: cwd = os.getcwd() if not os.access(cwd, os.F_OK|os.R_OK): raise return cwd except: # we don't have access to the cwd, probably because of sudo. # Try and move to a neutral location to prevent errors for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]: try: if os.access(cwd, os.F_OK|os.R_OK): os.chdir(cwd) return cwd except: pass # we won't error here, as it may *not* be a problem, # and we don't want to break modules unnecessarily return None def get_bin_path(self, arg, required=False, opt_dirs=[]): ''' find system executable in PATH. Optional arguments: - required: if executable is not found and required is true, fail_json - opt_dirs: optional list of directories to search in addition to PATH if found return full path; otherwise return None ''' sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] paths = [] for d in opt_dirs: if d is not None and os.path.exists(d): paths.append(d) paths += os.environ.get('PATH', '').split(os.pathsep) bin_path = None # mangle PATH to include /sbin dirs for p in sbin_paths: if p not in paths and os.path.exists(p): paths.append(p) for d in paths: if not d: continue path = os.path.join(d, arg) if os.path.exists(path) and is_executable(path): bin_path = path break if required and bin_path is None: self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths))) return bin_path def boolean(self, arg): ''' return a bool for the arg ''' if arg is None or isinstance(arg, bool): return arg if isinstance(arg, string_types): arg = arg.lower() if arg in BOOLEANS_TRUE: return True elif arg in BOOLEANS_FALSE: return False else: self.fail_json(msg='%s is not a valid boolean. Valid booleans include: %s' % (to_text(arg), ','.join(['%s' % x for x in BOOLEANS]))) def jsonify(self, data): for encoding in ("utf-8", "latin-1"): try: return json.dumps(data, encoding=encoding) # Old systems using old simplejson module does not support encoding keyword. except TypeError: try: new_data = json_dict_bytes_to_unicode(data, encoding=encoding) except UnicodeDecodeError: continue return json.dumps(new_data) except UnicodeDecodeError: continue self.fail_json(msg='Invalid unicode encoding encountered') def from_json(self, data): return json.loads(data) def add_cleanup_file(self, path): if path not in self.cleanup_files: self.cleanup_files.append(path) def do_cleanup_files(self): for path in self.cleanup_files: self.cleanup(path) def _return_formatted(self, kwargs): self.add_path_info(kwargs) if 'invocation' not in kwargs: kwargs['invocation'] = {'module_args': self.params} if 'warnings' in kwargs: if isinstance(kwargs['warnings'], list): for w in kwargs['warnings']: self.warn(w) else: self.warn(kwargs['warnings']) if self._warnings: kwargs['warnings'] = self._warnings if 'deprecations' in kwargs: if isinstance(kwargs['deprecations'], list): for d in kwargs['deprecations']: if isinstance(d, SEQUENCETYPE) and len(d) == 2: self.deprecate(d[0], version=d[1]) else: self.deprecate(d) else: self.deprecate(d) if self._deprecations: kwargs['deprecations'] = self._deprecations kwargs = remove_values(kwargs, self.no_log_values) print('\n%s' % self.jsonify(kwargs)) def exit_json(self, **kwargs): ''' return from the module, without error ''' if not 'changed' in kwargs: kwargs['changed'] = False self.do_cleanup_files() self._return_formatted(kwargs) sys.exit(0) def fail_json(self, **kwargs): ''' return from the module, with an error message ''' assert 'msg' in kwargs, "implementation error -- msg to explain the error is required" kwargs['failed'] = True self.do_cleanup_files() self._return_formatted(kwargs) sys.exit(1) def fail_on_missing_params(self, required_params=None): ''' This is for checking for required params when we can not check via argspec because we need more information than is simply given in the argspec. ''' if not required_params: return missing_params = [] for required_param in required_params: if not self.params.get(required_param): missing_params.append(required_param) if missing_params: self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params)) def digest_from_file(self, filename, algorithm): ''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. ''' if not os.path.exists(filename): return None if os.path.isdir(filename): self.fail_json(msg="attempted to take checksum of directory: %s" % filename) # preserve old behaviour where the third parameter was a hash algorithm object if hasattr(algorithm, 'hexdigest'): digest_method = algorithm else: try: digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]() except KeyError: self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" % (filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS))) blocksize = 64 * 1024 infile = open(filename, 'rb') block = infile.read(blocksize) while block: digest_method.update(block) block = infile.read(blocksize) infile.close() return digest_method.hexdigest() def md5(self, filename): ''' Return MD5 hex digest of local file using digest_from_file(). Do not use this function unless you have no other choice for: 1) Optional backwards compatibility 2) Compatibility with a third party protocol This function will not work on systems complying with FIPS-140-2. Most uses of this function can use the module.sha1 function instead. ''' if 'md5' not in AVAILABLE_HASH_ALGORITHMS: raise ValueError('MD5 not available. Possibly running in FIPS mode') return self.digest_from_file(filename, 'md5') def sha1(self, filename): ''' Return SHA1 hex digest of local file using digest_from_file(). ''' return self.digest_from_file(filename, 'sha1') def sha256(self, filename): ''' Return SHA-256 hex digest of local file using digest_from_file(). ''' return self.digest_from_file(filename, 'sha256') def backup_local(self, fn): '''make a date-marked backup of the specified file, return True or False on success or failure''' backupdest = '' if os.path.exists(fn): # backups named basename-YYYY-MM-DD@HH:MM:SS~ ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time())) backupdest = '%s.%s.%s' % (fn, os.getpid(), ext) try: shutil.copy2(fn, backupdest) except (shutil.Error, IOError): e = get_exception() self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e)) return backupdest def cleanup(self, tmpfile): if os.path.exists(tmpfile): try: os.unlink(tmpfile) except OSError: e = get_exception() sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e)) def atomic_move(self, src, dest, unsafe_writes=False): '''atomically move src to dest, copying attributes from dest, returns true on success it uses os.rename to ensure this as it is an atomic operation, rest of the function is to work around limitations, corner cases and ensure selinux context is saved if possible''' context = None dest_stat = None b_src = to_bytes(src, errors='surrogate_or_strict') b_dest = to_bytes(dest, errors='surrogate_or_strict') if os.path.exists(b_dest): try: dest_stat = os.stat(b_dest) # copy mode and ownership os.chmod(b_src, dest_stat.st_mode & PERM_BITS) os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid) # try to copy flags if possible if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'): try: os.chflags(b_src, dest_stat.st_flags) except OSError: e = get_exception() for err in 'EOPNOTSUPP', 'ENOTSUP': if hasattr(errno, err) and e.errno == getattr(errno, err): break else: raise except OSError: e = get_exception() if e.errno != errno.EPERM: raise if self.selinux_enabled(): context = self.selinux_context(dest) else: if self.selinux_enabled(): context = self.selinux_default_context(dest) creating = not os.path.exists(b_dest) try: # Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic. os.rename(b_src, b_dest) except (IOError, OSError): e = get_exception() if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]: # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied) # and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc()) else: b_dest_dir = os.path.dirname(b_dest) # Use bytes here. In the shippable CI, this fails with # a UnicodeError with surrogateescape'd strings for an unknown # reason (doesn't happen in a local Ubuntu16.04 VM) native_dest_dir = b_dest_dir native_suffix = os.path.basename(b_dest) native_prefix = b('.ansible_tmp') try: tmp_dest_fd, tmp_dest_name = tempfile.mkstemp( prefix=native_prefix, dir=native_dest_dir, suffix=native_suffix) except (OSError, IOError): e = get_exception() self.fail_json(msg='The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), e)) except TypeError: # We expect that this is happening because python3.4.x and # below can't handle byte strings in mkstemp(). Traceback # would end in something like: # file = _os.path.join(dir, pre + name + suf) # TypeError: can't concat bytes to str self.fail_json(msg='Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. Please use Python2.x or Python3.5 or greater.', exception=traceback.format_exc()) b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict') try: try: # close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host) os.close(tmp_dest_fd) # leaves tmp file behind when sudo and not root try: shutil.move(b_src, b_tmp_dest_name) except OSError: # cleanup will happen by 'rm' of tempdir # copy2 will preserve some metadata shutil.copy2(b_src, b_tmp_dest_name) if self.selinux_enabled(): self.set_context_if_different( b_tmp_dest_name, context, False) try: tmp_stat = os.stat(b_tmp_dest_name) if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid): os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid) except OSError: e = get_exception() if e.errno != errno.EPERM: raise try: os.rename(b_tmp_dest_name, b_dest) except (shutil.Error, OSError, IOError): e = get_exception() if unsafe_writes and e.errno == errno.EBUSY: self._unsafe_writes(b_tmp_dest_name, b_dest) else: self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc()) except (shutil.Error, OSError, IOError): e = get_exception() self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc()) finally: self.cleanup(b_tmp_dest_name) if creating: # make sure the file has the correct permissions # based on the current value of umask umask = os.umask(0) os.umask(umask) os.chmod(b_dest, DEFAULT_PERM & ~umask) try: os.chown(b_dest, os.geteuid(), os.getegid()) except OSError: # We're okay with trying our best here. If the user is not # root (or old Unices) they won't be able to chown. pass if self.selinux_enabled(): # rename might not preserve context self.set_context_if_different(dest, context, False) def _unsafe_writes(self, src, dest): # sadly there are some situations where we cannot ensure atomicity, but only if # the user insists and we get the appropriate error we update the file unsafely try: try: out_dest = open(dest, 'wb') in_src = open(src, 'rb') shutil.copyfileobj(in_src, out_dest) finally: # assuring closed files in 2.4 compatible way if out_dest: out_dest.close() if in_src: in_src.close() except (shutil.Error, OSError, IOError): e = get_exception() self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e), exception=traceback.format_exc()) def _read_from_pipes(self, rpipes, rfds, file_descriptor): data = b('') if file_descriptor in rfds: data = os.read(file_descriptor.fileno(), 9000) if data == b(''): rpipes.remove(file_descriptor) return data def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'): ''' Execute a command, returns rc, stdout, and stderr. :arg args: is the command to run * If args is a list, the command will be run with shell=False. * If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False * If args is a string and use_unsafe_shell=True it runs with shell=True. :kw check_rc: Whether to call fail_json in case of non zero RC. Default False :kw close_fds: See documentation for subprocess.Popen(). Default True :kw executable: See documentation for subprocess.Popen(). Default None :kw data: If given, information to write to the stdin of the command :kw binary_data: If False, append a newline to the data. Default False :kw path_prefix: If given, additional path to find the command in. This adds to the PATH environment vairable so helper commands in the same directory can also be found :kw cwd: If given, working directory to run the command inside :kw use_unsafe_shell: See `args` parameter. Default False :kw prompt_regex: Regex string (not a compiled regex) which can be used to detect prompts in the stdout which would otherwise cause the execution to hang (especially if no input data is specified) :kw environ_update: dictionary to *update* os.environ with :kw umask: Umask to be used when running the command. Default None :kw encoding: Since we return native strings, on python3 we need to know the encoding to use to transform from bytes to text. If you want to always get bytes back, use encoding=None. The default is "utf-8". This does not affect transformation of strings given as args. :kw errors: Since we return native strings, on python3 we need to transform stdout and stderr from bytes to text. If the bytes are undecodable in the ``encoding`` specified, then use this error handler to deal with them. The default is ``surrogate_or_strict`` which means that the bytes will be decoded using the surrogateescape error handler if available (available on all python3 versions we support) otherwise a UnicodeError traceback will be raised. This does not affect transformations of strings given as args. :returns: A 3-tuple of return code (integer), stdout (native string), and stderr (native string). On python2, stdout and stderr are both byte strings. On python3, stdout and stderr are text strings converted according to the encoding and errors parameters. If you want byte strings on python3, use encoding=None to turn decoding to text off. ''' shell = False if isinstance(args, list): if use_unsafe_shell: args = " ".join([pipes.quote(x) for x in args]) shell = True elif isinstance(args, (binary_type, text_type)) and use_unsafe_shell: shell = True elif isinstance(args, (binary_type, text_type)): # On python2.6 and below, shlex has problems with text type # On python3, shlex needs a text type. if PY2: args = to_bytes(args, errors='surrogate_or_strict') elif PY3: args = to_text(args, errors='surrogateescape') args = shlex.split(args) else: msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) prompt_re = None if prompt_regex: if isinstance(prompt_regex, text_type): if PY3: prompt_regex = to_bytes(prompt_regex, errors='surrogateescape') elif PY2: prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict') try: prompt_re = re.compile(prompt_regex, re.MULTILINE) except re.error: self.fail_json(msg="invalid prompt regular expression given to run_command") # expand things like $HOME and ~ if not shell: args = [ os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None ] rc = 0 msg = None st_in = None # Manipulate the environ we'll send to the new process old_env_vals = {} # We can set this from both an attribute and per call for key, val in self.run_command_environ_update.items(): old_env_vals[key] = os.environ.get(key, None) os.environ[key] = val if environ_update: for key, val in environ_update.items(): old_env_vals[key] = os.environ.get(key, None) os.environ[key] = val if path_prefix: old_env_vals['PATH'] = os.environ['PATH'] os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH']) # If using test-module and explode, the remote lib path will resemble ... # /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py # If using ansible or ansible-playbook with a remote system ... # /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py # Clean out python paths set by ansiballz if 'PYTHONPATH' in os.environ: pypaths = os.environ['PYTHONPATH'].split(':') pypaths = [x for x in pypaths \ if not x.endswith('/ansible_modlib.zip') \ and not x.endswith('/debug_dir')] os.environ['PYTHONPATH'] = ':'.join(pypaths) if not os.environ['PYTHONPATH']: del os.environ['PYTHONPATH'] # create a printable version of the command for use # in reporting later, which strips out things like # passwords from the args list to_clean_args = args if PY2: if isinstance(args, text_type): to_clean_args = to_bytes(args) else: if isinstance(args, binary_type): to_clean_args = to_text(args) if isinstance(args, (text_type, binary_type)): to_clean_args = shlex.split(to_clean_args) clean_args = [] is_passwd = False for arg in to_clean_args: if is_passwd: is_passwd = False clean_args.append('********') continue if PASSWD_ARG_RE.match(arg): sep_idx = arg.find('=') if sep_idx > -1: clean_args.append('%s=********' % arg[:sep_idx]) continue else: is_passwd = True arg = heuristic_log_sanitize(arg, self.no_log_values) clean_args.append(arg) clean_args = ' '.join(pipes.quote(arg) for arg in clean_args) if data: st_in = subprocess.PIPE kwargs = dict( executable=executable, shell=shell, close_fds=close_fds, stdin=st_in, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) # store the pwd prev_dir = os.getcwd() # make sure we're in the right working directory if cwd and os.path.isdir(cwd): cwd = os.path.abspath(os.path.expanduser(cwd)) kwargs['cwd'] = cwd try: os.chdir(cwd) except (OSError, IOError): e = get_exception() self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e))) old_umask = None if umask: old_umask = os.umask(umask) try: if self._debug: self.log('Executing: ' + clean_args) cmd = subprocess.Popen(args, **kwargs) # the communication logic here is essentially taken from that # of the _communicate() function in ssh.py stdout = b('') stderr = b('') rpipes = [cmd.stdout, cmd.stderr] if data: if not binary_data: data += '\n' if isinstance(data, text_type): data = to_bytes(data) cmd.stdin.write(data) cmd.stdin.close() while True: rfds, wfds, efds = select.select(rpipes, [], rpipes, 1) stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout) stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr) # if we're checking for prompts, do it now if prompt_re: if prompt_re.search(stdout) and not data: if encoding: stdout = to_native(stdout, encoding=encoding, errors=errors) else: stdout = stdout return (257, stdout, "A prompt was encountered while running a command, but no input data was specified") # only break out if no pipes are left to read or # the pipes are completely read and # the process is terminated if (not rpipes or not rfds) and cmd.poll() is not None: break # No pipes are left to read but process is not yet terminated # Only then it is safe to wait for the process to be finished # NOTE: Actually cmd.poll() is always None here if rpipes is empty elif not rpipes and cmd.poll() is None: cmd.wait() # The process is terminated. Since no pipes to read from are # left, there is no need to call select() again. break cmd.stdout.close() cmd.stderr.close() rc = cmd.returncode except (OSError, IOError): e = get_exception() self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e))) self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args) except Exception: e = get_exception() self.log("Error Executing CMD:%s Exception:%s" % (clean_args,to_native(traceback.format_exc()))) self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args) # Restore env settings for key, val in old_env_vals.items(): if val is None: del os.environ[key] else: os.environ[key] = val if old_umask: os.umask(old_umask) if rc != 0 and check_rc: msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values) self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg) # reset the pwd os.chdir(prev_dir) if encoding is not None: return (rc, to_native(stdout, encoding=encoding, errors=errors), to_native(stderr, encoding=encoding, errors=errors)) return (rc, stdout, stderr) def append_to_file(self, filename, str): filename = os.path.expandvars(os.path.expanduser(filename)) fh = open(filename, 'a') fh.write(str) fh.close() def bytes_to_human(self, size): return bytes_to_human(size) # for backwards compatibility pretty_bytes = bytes_to_human def human_to_bytes(self, number, isbits=False): return human_to_bytes(number, isbits) # # Backwards compat # # In 2.0, moved from inside the module to the toplevel is_executable = is_executable def get_module_path(): return os.path.dirname(os.path.realpath(__file__))
alexanderturner/ansible
lib/ansible/module_utils/basic.py
Python
gpl-3.0
98,799
[ "VisIt" ]
a5bdc87705e568bdd11360fcac89b092fc91c335a51dd17d6aaee90fd0f5b8d6
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Implement AST vistor.""" from invenio_query_parser.ast import AndOp, DoubleQuotedValue, EmptyQuery, \ Keyword, KeywordOp, NotOp, OrOp, RangeOp, RegexValue, SingleQuotedValue, \ Value, ValueQuery from invenio_query_parser.visitor import make_visitor class FacetsVisitor(object): """Implement visitor to extract all facets filters.""" visitor = make_visitor() @staticmethod def jsonable(parsedFacets): """Convert a visited query result to a structure which can be jsonified. :param parsedFacets: a visited query result. """ result = {} # sets cannot be converted to json. We need to convert them to lists. for facet_name in parsedFacets: result[facet_name] = { 'inc': list(parsedFacets[facet_name]['inc']), 'exc': list(parsedFacets[facet_name]['exc']), } return result # pylint: disable=W0613,E0102,F999,D102 def _merge_facets(self, left, right): """merge faceting for an AND or OR operator. :param left: left child node faceting :param right: right child node faceting """ for k in right: if k in left: inc = left[k]['inc'].union(right[k]['inc']) exc = left[k]['exc'].union(right[k]['exc']) # Don't mark as included or excluded if only partially # included/excluded left[k] = { 'inc': inc.difference(exc), 'exc': exc.difference(inc), } else: left[k] = right[k] return left def _invert_facets(self, facets): """invert facet filters included <-> excluded. :param facets: facet filters """ for k in facets: facets[k] = { 'inc': facets[k]['exc'], 'exc': facets[k]['inc'], } return facets @visitor(AndOp) def visit(self, node, left, right): return self._merge_facets(left, right) @visitor(OrOp) def visit(self, node, left, right): return self._merge_facets(left, right) @visitor(NotOp) def visit(self, node, op): return self._invert_facets(op) @visitor(KeywordOp) def visit(self, node, left, right): return { node.left.value: { 'inc': set([node.right.value]), 'exc': set() } } @visitor(ValueQuery) def visit(self, node, op): return {} @visitor(Keyword) def visit(self, node): return {} @visitor(Value) def visit(self, node): return {} @visitor(SingleQuotedValue) def visit(self, node): return {} @visitor(DoubleQuotedValue) def visit(self, node): return {} @visitor(RegexValue) def visit(self, node): return {} @visitor(RangeOp) def visit(self, node, left, right): return {} @visitor(EmptyQuery) def visit(self, node): return {} # pylint: enable=W0612,E0102,F999,D102
drjova/invenio-search
invenio_search/walkers/facets.py
Python
gpl-2.0
3,896
[ "VisIt" ]
682e00ffb5c00100871ac70a7a04163e0d097ed84e98f5fe1f4c4601b69758f1
from __future__ import absolute_import from __future__ import division from __future__ import print_function from DIRAC import S_OK, S_ERROR, gLogger class FilterExecutor(object): ALLKW = "all" def __init__(self): self.__filters = {} self.__globalFilters = [] def applyFilters(self, iD, credDict, condDict, groupingList): filters2Apply = list(self.__globalFilters) if iD in self.__filters: filters2Apply.extend(self.__filters[iD]) for myFilter in filters2Apply: try: gLogger.info("Applying filter %s for %s" % (myFilter.__name__, iD)) retVal = myFilter(credDict, condDict, groupingList) if not retVal['OK']: gLogger.info("Filter %s for %s failed: %s" % (myFilter.__name__, iD, retVal['Message'])) return retVal except Exception: gLogger.exception("Exception while applying filter", "%s for %s" % (myFilter.__name__, iD)) return S_ERROR("Exception while applying filters") return S_OK() def addFilter(self, iD, myFilter): if iD not in self.__filters: self.__filters[iD] = [] if isinstance(myFilter, (list, tuple)): self.__filters[iD].extend(myFilter) else: self.__filters[iD].append(myFilter) def addGlobalFilter(self, myFilter): if isinstance(myFilter, (list, tuple)): self.__globalFilters.extend(myFilter) else: self.__globalFilters.append(myFilter)
yujikato/DIRAC
src/DIRAC/AccountingSystem/private/Policies/FilterExecutor.py
Python
gpl-3.0
1,421
[ "DIRAC" ]
ee664f4019b8ca22f580d6103db640ac1107e250bd076b1f689bc11d97a2a110
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for testing `LinearOperator` and sub-classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import numpy as np import six from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.platform import test @six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init class LinearOperatorDerivedClassTest(test.TestCase): """Tests for derived classes. Subclasses should implement every abstractmethod, and this will enable all test methods to work. """ # Absolute/relative tolerance for tests. _atol = { dtypes.float16: 1e-3, dtypes.float32: 1e-6, dtypes.float64: 1e-12, dtypes.complex64: 1e-6, dtypes.complex128: 1e-12 } _rtol = { dtypes.float16: 1e-3, dtypes.float32: 1e-6, dtypes.float64: 1e-12, dtypes.complex64: 1e-6, dtypes.complex128: 1e-12 } def assertAC(self, x, y): """Derived classes can set _atol, _rtol to get different tolerance.""" dtype = dtypes.as_dtype(x.dtype) atol = self._atol[dtype] rtol = self._rtol[dtype] self.assertAllClose(x, y, atol=atol, rtol=rtol) @property def _adjoint_options(self): return [False, True] @property def _adjoint_arg_options(self): return [False, True] @property def _dtypes_to_test(self): # TODO(langmore) Test tf.float16 once tf.matrix_solve works in 16bit. return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] @property def _use_placeholder_options(self): return [False, True] @abc.abstractproperty def _shapes_to_test(self): """Returns list of tuples, each is one shape that will be tested.""" raise NotImplementedError("shapes_to_test has not been implemented.") @abc.abstractmethod def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder): """Build a batch matrix and an Operator that should have similar behavior. Every operator acts like a (batch) matrix. This method returns both together, and is used by tests. Args: shape: List-like of Python integers giving full shape of operator. dtype: Numpy dtype. Data type of returned array/operator. use_placeholder: Python bool. If True, initialize the operator with a placeholder of undefined shape and correct dtype. Returns: operator: `LinearOperator` subclass instance. mat: `Tensor` representing operator. feed_dict: Dictionary. If placholder is True, this must contains everything needed to be fed to sess.run calls at runtime to make the operator work. """ # Create a matrix as a numpy array with desired shape/dtype. # Create a LinearOperator that should have the same behavior as the matrix. raise NotImplementedError("Not implemented yet.") @abc.abstractmethod def _make_rhs(self, operator, adjoint): """Make a rhs appropriate for calling operator.solve(rhs). Args: operator: A `LinearOperator` adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the adjoint operator. Returns: A `Tensor` """ raise NotImplementedError("_make_rhs is not defined.") @abc.abstractmethod def _make_x(self, operator, adjoint): """Make an 'x' appropriate for calling operator.matmul(x). Args: operator: A `LinearOperator` adjoint: Python `bool`. If `True`, we are making an 'x' value for the adjoint operator. Returns: A `Tensor` """ raise NotImplementedError("_make_x is not defined.") @property def _tests_to_skip(self): """List of test names to skip.""" # Subclasses should over-ride if they want to skip some tests. # To skip "test_foo", add "foo" to this list. return [] def _skip_if_tests_to_skip_contains(self, test_name): """If self._tests_to_skip contains test_name, raise SkipTest exception. See tests below for usage. Args: test_name: String name corresponding to a test. Raises: SkipTest Exception, if test_name is in self._tests_to_skip. """ if test_name in self._tests_to_skip: self.skipTest("%s skipped because it was added to self._tests_to_skip.") def test_to_dense(self): self._skip_if_tests_to_skip_contains("to_dense") for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_dense = operator.to_dense() if not use_placeholder: self.assertAllEqual(shape, op_dense.get_shape()) op_dense_v, mat_v = sess.run([op_dense, mat], feed_dict=feed_dict) self.assertAC(op_dense_v, mat_v) def test_det(self): self._skip_if_tests_to_skip_contains("det") for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_det = operator.determinant() if not use_placeholder: self.assertAllEqual(shape[:-2], op_det.get_shape()) op_det_v, mat_det_v = sess.run( [op_det, linalg_ops.matrix_determinant(mat)], feed_dict=feed_dict) self.assertAC(op_det_v, mat_det_v) def test_log_abs_det(self): self._skip_if_tests_to_skip_contains("log_abs_det") for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_log_abs_det = operator.log_abs_determinant() _, mat_log_abs_det = linalg.slogdet(mat) if not use_placeholder: self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape()) op_log_abs_det_v, mat_log_abs_det_v = sess.run( [op_log_abs_det, mat_log_abs_det], feed_dict=feed_dict) self.assertAC(op_log_abs_det_v, mat_log_abs_det_v) def test_matmul(self): self._skip_if_tests_to_skip_contains("matmul") for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: for adjoint in self._adjoint_options: for adjoint_arg in self._adjoint_arg_options: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) x = self._make_x(operator, adjoint=adjoint) # If adjoint_arg, compute A X^H^H = A X. if adjoint_arg: op_matmul = operator.matmul( linalg.adjoint(x), adjoint=adjoint, adjoint_arg=adjoint_arg) else: op_matmul = operator.matmul(x, adjoint=adjoint) mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint) if not use_placeholder: self.assertAllEqual(op_matmul.get_shape(), mat_matmul.get_shape()) op_matmul_v, mat_matmul_v = sess.run( [op_matmul, mat_matmul], feed_dict=feed_dict) self.assertAC(op_matmul_v, mat_matmul_v) def test_solve(self): self._skip_if_tests_to_skip_contains("solve") for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: for adjoint in self._adjoint_options: for adjoint_arg in self._adjoint_arg_options: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) rhs = self._make_rhs(operator, adjoint=adjoint) # If adjoint_arg, solve A X = (rhs^H)^H = rhs. if adjoint_arg: op_solve = operator.solve( linalg.adjoint(rhs), adjoint=adjoint, adjoint_arg=adjoint_arg) else: op_solve = operator.solve( rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) mat_solve = linalg_ops.matrix_solve(mat, rhs, adjoint=adjoint) if not use_placeholder: self.assertAllEqual(op_solve.get_shape(), mat_solve.get_shape()) op_solve_v, mat_solve_v = sess.run( [op_solve, mat_solve], feed_dict=feed_dict) self.assertAC(op_solve_v, mat_solve_v) def test_trace(self): self._skip_if_tests_to_skip_contains("trace") for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_trace = operator.trace() mat_trace = math_ops.trace(mat) if not use_placeholder: self.assertAllEqual(op_trace.get_shape(), mat_trace.get_shape()) op_trace_v, mat_trace_v = sess.run( [op_trace, mat_trace], feed_dict=feed_dict) self.assertAC(op_trace_v, mat_trace_v) def test_add_to_tensor(self): self._skip_if_tests_to_skip_contains("add_to_tensor") for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_plus_2mat = operator.add_to_tensor(2 * mat) if not use_placeholder: self.assertAllEqual(shape, op_plus_2mat.get_shape()) op_plus_2mat_v, mat_v = sess.run( [op_plus_2mat, mat], feed_dict=feed_dict) self.assertAC(op_plus_2mat_v, 3 * mat_v) def test_diag_part(self): self._skip_if_tests_to_skip_contains("diag_part") for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_diag_part = operator.diag_part() mat_diag_part = array_ops.matrix_diag_part(mat) if not use_placeholder: self.assertAllEqual(mat_diag_part.get_shape(), op_diag_part.get_shape()) op_diag_part_, mat_diag_part_ = sess.run( [op_diag_part, mat_diag_part], feed_dict=feed_dict) self.assertAC(op_diag_part_, mat_diag_part_) @six.add_metaclass(abc.ABCMeta) class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest): """Base test class appropriate for square operators. Sub-classes must still define all abstractmethods from LinearOperatorDerivedClassTest that are not defined here. """ @property def _shapes_to_test(self): # non-batch operators (n, n) and batch operators. return [(0, 0), (1, 1), (1, 3, 3), (3, 4, 4), (2, 1, 4, 4)] def _make_rhs(self, operator, adjoint): # This operator is square, so rhs and x will have same shape. # adjoint value makes no difference because the operator shape doesn't # change since it is square, but be pedantic. return self._make_x(operator, adjoint=not adjoint) def _make_x(self, operator, adjoint): # Value of adjoint makes no difference because the operator is square. # Return the number of systems to solve, R, equal to 1 or 2. r = self._get_num_systems(operator) # If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of # shape [B1,...,Bb, N, R], R = 1 or 2. if operator.shape.is_fully_defined(): batch_shape = operator.batch_shape.as_list() n = operator.domain_dimension.value x_shape = batch_shape + [n, r] else: batch_shape = operator.batch_shape_tensor() n = operator.domain_dimension_tensor() x_shape = array_ops.concat((batch_shape, [n, r]), 0) return random_normal(x_shape, dtype=operator.dtype) def _get_num_systems(self, operator): """Get some number, either 1 or 2, depending on operator.""" if operator.tensor_rank is None or operator.tensor_rank % 2: return 1 else: return 2 @six.add_metaclass(abc.ABCMeta) class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest): """Base test class appropriate for generic rectangular operators. Square shapes are never tested by this class, so if you want to test your operator with a square shape, create two test classes, the other subclassing SquareLinearOperatorFullMatrixTest. Sub-classes must still define all abstractmethods from LinearOperatorDerivedClassTest that are not defined here. """ @property def _tests_to_skip(self): """List of test names to skip.""" return ["solve", "det", "log_abs_det"] @property def _shapes_to_test(self): # non-batch operators (n, n) and batch operators. return [(2, 1), (1, 2), (1, 3, 2), (3, 3, 4), (2, 1, 2, 4)] def _make_rhs(self, operator, adjoint): # TODO(langmore) Add once we're testing solve_ls. raise NotImplementedError( "_make_rhs not implemented because we don't test solve") def _make_x(self, operator, adjoint): # Return the number of systems for the argument 'x' for .matmul(x) r = self._get_num_systems(operator) # If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of # shape [B1,...,Bb, N, R], R = 1 or 2. if operator.shape.is_fully_defined(): batch_shape = operator.batch_shape.as_list() if adjoint: n = operator.range_dimension.value else: n = operator.domain_dimension.value x_shape = batch_shape + [n, r] else: batch_shape = operator.batch_shape_tensor() if adjoint: n = operator.range_dimension_tensor() else: n = operator.domain_dimension_tensor() x_shape = array_ops.concat((batch_shape, [n, r]), 0) return random_normal(x_shape, dtype=operator.dtype) def _get_num_systems(self, operator): """Get some number, either 1 or 2, depending on operator.""" if operator.tensor_rank is None or operator.tensor_rank % 2: return 1 else: return 2 def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False): """[batch] positive definite matrix. Args: shape: `TensorShape` or Python list. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype. force_well_conditioned: Python bool. If `True`, returned matrix has eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are chi-squared random variables. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) if not tensor_util.is_tensor(shape): shape = tensor_shape.TensorShape(shape) # Matrix must be square. shape[-1].assert_is_compatible_with(shape[-2]) with ops.name_scope("random_positive_definite_matrix"): tril = random_tril_matrix( shape, dtype, force_well_conditioned=force_well_conditioned) return math_ops.matmul(tril, tril, adjoint_b=True) def random_tril_matrix(shape, dtype, force_well_conditioned=False, remove_upper=True): """[batch] lower triangular matrix. Args: shape: `TensorShape` or Python `list`. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype force_well_conditioned: Python `bool`. If `True`, returned matrix will have eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit normal random variables. remove_upper: Python `bool`. If `True`, zero out the strictly upper triangle. If `False`, the lower triangle of returned matrix will have desired properties, but will not have the strictly upper triangle zero'd out. Returns: `Tensor` with desired shape and dtype. """ with ops.name_scope("random_tril_matrix"): # Totally random matrix. Has no nice properties. tril = random_normal(shape, dtype=dtype) if remove_upper: tril = array_ops.matrix_band_part(tril, -1, 0) # Create a diagonal with entries having modulus in [1, 2]. if force_well_conditioned: maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype) diag = random_sign_uniform( shape[:-1], dtype=dtype, minval=1., maxval=maxval) tril = array_ops.matrix_set_diag(tril, diag) return tril def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Gaussian entries. Samples are distributed like ``` N(mean, stddev^2), if dtype is real, X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_normal"): samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) if dtype.is_complex: if seed is not None: seed += 1234 more_samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) samples = math_ops.complex(samples, more_samples) return samples def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Uniform entries. Samples are distributed like ``` Uniform[minval, maxval], if dtype is real, X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_uniform"): samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) if dtype.is_complex: if seed is not None: seed += 12345 more_samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) samples = math_ops.complex(samples, more_samples) return samples def random_sign_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) random entries from a "sign Uniform". Letting `Z` be a random variable equal to `-1` and `1` with equal probability, Samples from this `Op` are distributed like ``` Z * X, where X ~ Uniform[minval, maxval], if dtype is real, Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_sign_uniform"): unsigned_samples = random_uniform( shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) if seed is not None: seed += 12 signs = math_ops.sign( random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed)) return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype) def random_normal_correlated_columns(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, eps=1e-4, seed=None): """Batch matrix with (possibly complex) Gaussian entries and correlated cols. Returns random batch matrix `A` with specified element-wise `mean`, `stddev`, living close to an embedded hyperplane. Suppose `shape[-2:] = (M, N)`. If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries. If `M >= N`, then the colums of `A` will be made almost dependent as follows: ``` L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1) B = random normal M x N-1 matrix, mean = 0, stddev = stddev. G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane E = a random normal M x N matrix, mean = 0, stddev = eps mu = a constant M x N matrix, equal to the argument "mean" A = G + E + mu ``` Args: shape: Python list of integers. Shape of the returned tensor. Must be at least length two. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype eps: Distance each column is perturbed from the low-dimensional subspace. seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. Raises: ValueError: If `shape` is not at least length 2. """ dtype = dtypes.as_dtype(dtype) if len(shape) < 2: raise ValueError( "Argument shape must be at least length 2. Found: %s" % shape) # Shape is the final shape, e.g. [..., M, N] shape = list(shape) batch_shape = shape[:-2] m, n = shape[-2:] # If there is only one column, "they" are by definition correlated. if n < 2 or n < m: return random_normal( shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) # Shape of the matrix with only n - 1 columns that we will embed in higher # dimensional space. smaller_shape = batch_shape + [m, n - 1] # Shape of the embedding matrix, mapping batch matrices # from [..., N-1, M] to [..., N, M] embedding_mat_shape = batch_shape + [n, n - 1] # This stddev for the embedding_mat ensures final result has correct stddev. stddev_mat = 1 / np.sqrt(n - 1) with ops.name_scope("random_normal_correlated_columns"): smaller_mat = random_normal( smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed) if seed is not None: seed += 1287 embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed) embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True) embedded = array_ops.matrix_transpose(embedded_t) mean_mat = array_ops.ones_like(embedded) * mean return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat
Mistobaan/tensorflow
tensorflow/python/ops/linalg/linear_operator_test_util.py
Python
apache-2.0
25,949
[ "Gaussian" ]
6f94f815bdd6a736b95139c646f11305f535a57a7691064d691b5d90bdd6dcfb
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from collections import defaultdict from distutils.version import LooseVersion import inspect from itertools import product from datetime import datetime, timedelta from decimal import Decimal import numpy as np import pandas as pd from pyspark.ml.linalg import SparseVector from pyspark import pandas as ps from pyspark.testing.pandasutils import ( have_tabulate, PandasOnSparkTestCase, SPARK_CONF_ARROW_ENABLED, tabulate_requirement_message, ) from pyspark.testing.sqlutils import SQLTestUtils from pyspark.pandas.exceptions import PandasNotImplementedError from pyspark.pandas.missing.series import MissingPandasLikeSeries from pyspark.pandas.typedef.typehints import ( extension_dtypes, extension_dtypes_available, extension_float_dtypes_available, extension_object_dtypes_available, ) class SeriesTest(PandasOnSparkTestCase, SQLTestUtils): @property def pser(self): return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") @property def psser(self): return ps.from_pandas(self.pser) def test_series_ops(self): pser = self.pser psser = self.psser self.assert_eq(psser + 1 + 10 * psser, pser + 1 + 10 * pser) self.assert_eq(psser + 1 + 10 * psser.index, pser + 1 + 10 * pser.index) self.assert_eq(psser.index + 1 + 10 * psser, pser.index + 1 + 10 * pser) def test_series_tuple_name(self): pser = self.pser pser.name = ("x", "a") psser = ps.from_pandas(pser) self.assert_eq(psser, pser) self.assert_eq(psser.name, pser.name) pser.name = ("y", "z") psser.name = ("y", "z") self.assert_eq(psser, pser) self.assert_eq(psser.name, pser.name) def test_repr_cache_invalidation(self): # If there is any cache, inplace operations should invalidate it. s = ps.range(10)["id"] s.__repr__() s.rename("a", inplace=True) self.assertEqual(s.__repr__(), s.rename("a").__repr__()) def _check_extension(self, psser, pser): if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"): self.assert_eq(psser, pser, check_exact=False) self.assertTrue(isinstance(psser.dtype, extension_dtypes)) else: self.assert_eq(psser, pser) def test_empty_series(self): pser_a = pd.Series([], dtype="i1") pser_b = pd.Series([], dtype="str") self.assert_eq(ps.from_pandas(pser_a), pser_a) psser_b = ps.from_pandas(pser_b) self.assert_eq(psser_b, pser_b) with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}): self.assert_eq(ps.from_pandas(pser_a), pser_a) self.assert_eq(ps.from_pandas(pser_b), pser_b) def test_all_null_series(self): pser_a = pd.Series([None, None, None], dtype="float64") pser_b = pd.Series([None, None, None], dtype="str") self.assert_eq(ps.from_pandas(pser_a), pser_a) psser_b = ps.from_pandas(pser_b) self.assert_eq(psser_b, pser_b) with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}): self.assert_eq(ps.from_pandas(pser_a), pser_a) self.assert_eq(ps.from_pandas(pser_b), pser_b) def test_head(self): psser = self.psser pser = self.pser self.assert_eq(psser.head(3), pser.head(3)) self.assert_eq(psser.head(0), pser.head(0)) self.assert_eq(psser.head(-3), pser.head(-3)) self.assert_eq(psser.head(-10), pser.head(-10)) def test_last(self): with self.assertRaises(TypeError): self.psser.last("1D") index = pd.date_range("2018-04-09", periods=4, freq="2D") pser = pd.Series([1, 2, 3, 4], index=index) psser = ps.from_pandas(pser) self.assert_eq(psser.last("1D"), pser.last("1D")) def test_first(self): with self.assertRaises(TypeError): self.psser.first("1D") index = pd.date_range("2018-04-09", periods=4, freq="2D") pser = pd.Series([1, 2, 3, 4], index=index) psser = ps.from_pandas(pser) self.assert_eq(psser.first("1D"), pser.first("1D")) def test_rename(self): pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") psser = ps.from_pandas(pser) pser.name = "renamed" psser.name = "renamed" self.assertEqual(psser.name, "renamed") self.assert_eq(psser, pser) pser.name = None psser.name = None self.assertEqual(psser.name, None) self.assert_eq(psser, pser) pidx = pser.index psidx = psser.index pidx.name = "renamed" psidx.name = "renamed" self.assertEqual(psidx.name, "renamed") self.assert_eq(psidx, pidx) expected_error_message = "Series.name must be a hashable type" with self.assertRaisesRegex(TypeError, expected_error_message): psser.name = ["renamed"] with self.assertRaisesRegex(TypeError, expected_error_message): psser.name = ["0", "1"] with self.assertRaisesRegex(TypeError, expected_error_message): ps.Series([1, 2, 3], name=["0", "1"]) def test_rename_method(self): # Series name pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") psser = ps.from_pandas(pser) self.assert_eq(psser.rename("y"), pser.rename("y")) self.assertEqual(psser.name, "x") # no mutation self.assert_eq(psser.rename(), pser.rename()) self.assert_eq((psser.rename("y") + 1).head(), (pser.rename("y") + 1).head()) psser.rename("z", inplace=True) pser.rename("z", inplace=True) self.assertEqual(psser.name, "z") self.assert_eq(psser, pser) expected_error_message = "Series.name must be a hashable type" with self.assertRaisesRegex(TypeError, expected_error_message): psser.rename(["0", "1"]) # Series index # pser = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x') # psser = ps.from_pandas(s) # TODO: index # res = psser.rename(lambda x: x ** 2) # self.assert_eq(res, pser.rename(lambda x: x ** 2)) # res = psser.rename(pser) # self.assert_eq(res, pser.rename(pser)) # res = psser.rename(psser) # self.assert_eq(res, pser.rename(pser)) # res = psser.rename(lambda x: x**2, inplace=True) # self.assertis(res, psser) # s.rename(lambda x: x**2, inplace=True) # self.assert_eq(psser, pser) def test_rename_axis(self): index = pd.Index(["A", "B", "C"], name="index") pser = pd.Series([1.0, 2.0, 3.0], index=index, name="name") psser = ps.from_pandas(pser) self.assert_eq( pser.rename_axis("index2").sort_index(), psser.rename_axis("index2").sort_index(), ) self.assert_eq( (pser + 1).rename_axis("index2").sort_index(), (psser + 1).rename_axis("index2").sort_index(), ) pser2 = pser.copy() psser2 = psser.copy() pser2.rename_axis("index2", inplace=True) psser2.rename_axis("index2", inplace=True) self.assert_eq(pser2.sort_index(), psser2.sort_index()) self.assertRaises(ValueError, lambda: psser.rename_axis(["index2", "index3"])) self.assertRaises(TypeError, lambda: psser.rename_axis(mapper=["index2"], index=["index3"])) # index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0 if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"): self.assert_eq( pser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index(), psser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index(), ) self.assert_eq( pser.rename_axis(index=str.upper).sort_index(), psser.rename_axis(index=str.upper).sort_index(), ) else: expected = psser expected.index.name = "index2" result = psser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index() self.assert_eq(expected, result) expected = psser expected.index.name = "INDEX" result = psser.rename_axis(index=str.upper).sort_index() self.assert_eq(expected, result) index = pd.MultiIndex.from_tuples( [("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"] ) pser = pd.Series([1.0, 2.0, 3.0], index=index, name="name") psser = ps.from_pandas(pser) self.assert_eq( pser.rename_axis(["index3", "index4"]).sort_index(), psser.rename_axis(["index3", "index4"]).sort_index(), ) self.assertRaises(ValueError, lambda: psser.rename_axis(["index3", "index4", "index5"])) # index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0 if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"): self.assert_eq( pser.rename_axis( index={"index1": "index3", "index2": "index4", "missing": "index5"} ).sort_index(), psser.rename_axis( index={"index1": "index3", "index2": "index4", "missing": "index5"} ).sort_index(), ) self.assert_eq( pser.rename_axis(index=str.upper).sort_index(), psser.rename_axis(index=str.upper).sort_index(), ) else: expected = psser expected.index.names = ["index3", "index4"] result = psser.rename_axis( index={"index1": "index3", "index2": "index4", "missing": "index5"} ).sort_index() self.assert_eq(expected, result) expected.index.names = ["INDEX1", "INDEX2"] result = psser.rename_axis(index=str.upper).sort_index() self.assert_eq(expected, result) def test_or(self): pdf = pd.DataFrame( { "left": [True, False, True, False, np.nan, np.nan, True, False, np.nan], "right": [True, False, False, True, True, False, np.nan, np.nan, np.nan], } ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf["left"] | psdf["right"], pdf["left"] | pdf["right"]) self.assert_eq(psdf["left"] | True, pdf["left"] | True) self.assert_eq(psdf["left"] | False, pdf["left"] | False) self.assert_eq(psdf["left"] | None, pdf["left"] | None) self.assert_eq(True | psdf["right"], True | pdf["right"]) self.assert_eq(False | psdf["right"], False | pdf["right"]) self.assert_eq(None | psdf["right"], None | pdf["right"]) @unittest.skipIf( not extension_object_dtypes_available, "pandas extension object dtypes are not available" ) def test_or_extenstion_dtypes(self): pdf = pd.DataFrame( { "left": [True, False, True, False, np.nan, np.nan, True, False, np.nan], "right": [True, False, False, True, True, False, np.nan, np.nan, np.nan], } ).astype("boolean") psdf = ps.from_pandas(pdf) self._check_extension(psdf["left"] | psdf["right"], pdf["left"] | pdf["right"]) self._check_extension(psdf["left"] | True, pdf["left"] | True) self._check_extension(psdf["left"] | False, pdf["left"] | False) self._check_extension(psdf["left"] | pd.NA, pdf["left"] | pd.NA) self._check_extension(True | psdf["right"], True | pdf["right"]) self._check_extension(False | psdf["right"], False | pdf["right"]) self._check_extension(pd.NA | psdf["right"], pd.NA | pdf["right"]) def test_and(self): pdf = pd.DataFrame( { "left": [True, False, True, False, np.nan, np.nan, True, False, np.nan], "right": [True, False, False, True, True, False, np.nan, np.nan, np.nan], } ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf["left"] & psdf["right"], pdf["left"] & pdf["right"]) self.assert_eq(psdf["left"] & True, pdf["left"] & True) self.assert_eq(psdf["left"] & False, pdf["left"] & False) self.assert_eq(psdf["left"] & None, pdf["left"] & None) self.assert_eq(True & psdf["right"], True & pdf["right"]) self.assert_eq(False & psdf["right"], False & pdf["right"]) self.assert_eq(None & psdf["right"], None & pdf["right"]) @unittest.skipIf( not extension_object_dtypes_available, "pandas extension object dtypes are not available" ) def test_and_extenstion_dtypes(self): pdf = pd.DataFrame( { "left": [True, False, True, False, np.nan, np.nan, True, False, np.nan], "right": [True, False, False, True, True, False, np.nan, np.nan, np.nan], } ).astype("boolean") psdf = ps.from_pandas(pdf) self._check_extension(psdf["left"] & psdf["right"], pdf["left"] & pdf["right"]) self._check_extension(psdf["left"] & True, pdf["left"] & True) self._check_extension(psdf["left"] & False, pdf["left"] & False) self._check_extension(psdf["left"] & pd.NA, pdf["left"] & pd.NA) self._check_extension(True & psdf["right"], True & pdf["right"]) self._check_extension(False & psdf["right"], False & pdf["right"]) self._check_extension(pd.NA & psdf["right"], pd.NA & pdf["right"]) def test_to_numpy(self): pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") psser = ps.from_pandas(pser) self.assert_eq(psser.to_numpy(), pser.values) def test_isin(self): pser = pd.Series(["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal") psser = ps.from_pandas(pser) self.assert_eq(psser.isin(["cow", "lama"]), pser.isin(["cow", "lama"])) self.assert_eq(psser.isin(np.array(["cow", "lama"])), pser.isin(np.array(["cow", "lama"]))) self.assert_eq(psser.isin({"cow"}), pser.isin({"cow"})) pser = pd.Series([np.int64(1), np.int32(1), 1]) psser = ps.from_pandas(pser) self.assert_eq(psser.isin([np.int64(1)]), pser.isin([np.int64(1)])) msg = "only list-like objects are allowed to be passed to isin()" with self.assertRaisesRegex(TypeError, msg): psser.isin(1) # when Series have NaN pser = pd.Series(["lama", "cow", None, "lama", "beetle", "lama", "hippo", None], name="a") psser = ps.from_pandas(pser) self.assert_eq(psser.isin(["cow", "lama"]), pser.isin(["cow", "lama"])) pser = pd.Series([None, 5, None, 3, 2, 1, None, 0, 0], name="a") psser = ps.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.2"): self.assert_eq(psser.isin([1, 5, 0, None]), pser.isin([1, 5, 0, None])) else: expected = pd.Series( [False, True, False, False, False, True, False, True, True], name="a" ) self.assert_eq(psser.isin([1, 5, 0, None]), expected) def test_drop_duplicates(self): pdf = pd.DataFrame({"animal": ["lama", "cow", "lama", "beetle", "lama", "hippo"]}) psdf = ps.from_pandas(pdf) pser = pdf.animal psser = psdf.animal self.assert_eq(psser.drop_duplicates().sort_index(), pser.drop_duplicates().sort_index()) self.assert_eq( psser.drop_duplicates(keep="last").sort_index(), pser.drop_duplicates(keep="last").sort_index(), ) # inplace psser.drop_duplicates(keep=False, inplace=True) pser.drop_duplicates(keep=False, inplace=True) self.assert_eq(psser.sort_index(), pser.sort_index()) self.assert_eq(psdf, pdf) def test_reindex(self): index = ["A", "B", "C", "D", "E"] pser = pd.Series([1.0, 2.0, 3.0, 4.0, None], index=index, name="x") psser = ps.from_pandas(pser) self.assert_eq(pser, psser) self.assert_eq( pser.reindex(["A", "B"]).sort_index(), psser.reindex(["A", "B"]).sort_index(), ) self.assert_eq( pser.reindex(["A", "B", "2", "3"]).sort_index(), psser.reindex(["A", "B", "2", "3"]).sort_index(), ) self.assert_eq( pser.reindex(["A", "E", "2"], fill_value=0).sort_index(), psser.reindex(["A", "E", "2"], fill_value=0).sort_index(), ) self.assertRaises(TypeError, lambda: psser.reindex(index=123)) def test_reindex_like(self): data = [1.0, 2.0, None] index = pd.Index(["A", "B", "C"], name="index1") pser = pd.Series(data=data, index=index, name="name1") psser = ps.from_pandas(pser) # Reindexing single Index on single Index data2 = [3.0, None, 4.0] index2 = pd.Index(["A", "C", "D"], name="index2") pser2 = pd.Series(data=data2, index=index2, name="name2") psser2 = ps.from_pandas(pser2) self.assert_eq( pser.reindex_like(pser2).sort_index(), psser.reindex_like(psser2).sort_index(), ) self.assert_eq( (pser + 1).reindex_like(pser2).sort_index(), (psser + 1).reindex_like(psser2).sort_index(), ) # Reindexing MultiIndex on single Index index2 = pd.MultiIndex.from_tuples( [("A", "G"), ("C", "D"), ("I", "J")], names=["index3", "index4"] ) pser2 = pd.Series(data=data2, index=index2, name="name2") psser2 = ps.from_pandas(pser2) self.assert_eq( pser.reindex_like(pser2).sort_index(), psser.reindex_like(psser2).sort_index(), ) self.assertRaises(TypeError, lambda: psser.reindex_like(index2)) self.assertRaises(AssertionError, lambda: psser2.reindex_like(psser)) # Reindexing MultiIndex on MultiIndex index = pd.MultiIndex.from_tuples( [("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"] ) pser = pd.Series(data=data, index=index, name="name1") psser = ps.from_pandas(pser) self.assert_eq( pser.reindex_like(pser2).sort_index(), psser.reindex_like(psser2).sort_index(), ) # Reindexing with DataFrame index2 = pd.MultiIndex.from_tuples( [("A", "B"), ("C", "D"), ("E", "F")], names=["name3", "name4"] ) pdf = pd.DataFrame(data=data, index=index2) psdf = ps.from_pandas(pdf) self.assert_eq( pser.reindex_like(pdf).sort_index(), psser.reindex_like(psdf).sort_index(), ) def test_fillna(self): pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]}) psdf = ps.from_pandas(pdf) pser = pdf.x psser = psdf.x self.assert_eq(psser.fillna(0), pser.fillna(0)) self.assert_eq(psser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0)) psser.fillna(0, inplace=True) pser.fillna(0, inplace=True) self.assert_eq(psser, pser) self.assert_eq(psdf, pdf) # test considering series does not have NA/NaN values psser.fillna(0, inplace=True) pser.fillna(0, inplace=True) self.assert_eq(psser, pser) psser = psdf.x.rename("y") pser = pdf.x.rename("y") psser.fillna(0, inplace=True) pser.fillna(0, inplace=True) self.assert_eq(psser.head(), pser.head()) pser = pd.Series([1, 2, 3, 4, 5, 6], name="x") psser = ps.from_pandas(pser) pser.loc[3] = np.nan psser.loc[3] = np.nan self.assert_eq(psser.fillna(0), pser.fillna(0)) self.assert_eq(psser.fillna(method="ffill"), pser.fillna(method="ffill")) self.assert_eq(psser.fillna(method="bfill"), pser.fillna(method="bfill")) # inplace fillna on non-nullable column pdf = pd.DataFrame({"a": [1, 2, None], "b": [1, 2, 3]}) psdf = ps.from_pandas(pdf) pser = pdf.b psser = psdf.b self.assert_eq(psser.fillna(0), pser.fillna(0)) self.assert_eq(psser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0)) psser.fillna(0, inplace=True) pser.fillna(0, inplace=True) self.assert_eq(psser, pser) self.assert_eq(psdf, pdf) with self.assertRaisesRegex( ValueError, "Must specify a fillna 'value' or 'method' parameter." ): psser.fillna() def test_dropna(self): pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6]}) psdf = ps.from_pandas(pdf) pser = pdf.x psser = psdf.x self.assert_eq(psser.dropna(), pser.dropna()) pser.dropna(inplace=True) psser.dropna(inplace=True) self.assert_eq(psser, pser) self.assert_eq(psdf, pdf) def test_nunique(self): pser = pd.Series([1, 2, 1, np.nan]) psser = ps.from_pandas(pser) # Assert NaNs are dropped by default nunique_result = psser.nunique() self.assertEqual(nunique_result, 2) self.assert_eq(nunique_result, pser.nunique()) # Assert including NaN values nunique_result = psser.nunique(dropna=False) self.assertEqual(nunique_result, 3) self.assert_eq(nunique_result, pser.nunique(dropna=False)) # Assert approximate counts self.assertEqual(ps.Series(range(100)).nunique(approx=True), 103) self.assertEqual(ps.Series(range(100)).nunique(approx=True, rsd=0.01), 100) def test_value_counts(self): # this is also containing test for Index & MultiIndex pser = pd.Series( [1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3], index=[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3], name="x", ) psser = ps.from_pandas(pser) exp = pser.value_counts() res = psser.value_counts() self.assertEqual(res.name, exp.name) self.assert_eq(res, exp) self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True)) self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True)) self.assert_eq( psser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), ) self.assert_eq( psser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), ) self.assert_eq( psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True) ) self.assert_eq( psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True) ) self.assert_eq( psser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), ) self.assert_eq( psser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), ) with self.assertRaisesRegex( NotImplementedError, "value_counts currently does not support bins" ): psser.value_counts(bins=3) pser.name = "index" psser.name = "index" self.assert_eq(psser.value_counts(), pser.value_counts()) # Series from DataFrame pdf = pd.DataFrame({"a": [2, 2, 3], "b": [None, 1, None]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.a.value_counts(normalize=True), pdf.a.value_counts(normalize=True)) self.assert_eq(psdf.a.value_counts(ascending=True), pdf.a.value_counts(ascending=True)) self.assert_eq( psdf.a.value_counts(normalize=True, dropna=False), pdf.a.value_counts(normalize=True, dropna=False), ) self.assert_eq( psdf.a.value_counts(ascending=True, dropna=False), pdf.a.value_counts(ascending=True, dropna=False), ) self.assert_eq( psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True) ) self.assert_eq( psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True) ) self.assert_eq( psser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), ) self.assert_eq( psser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), ) # Series with NaN index pser = pd.Series([3, 2, 3, 1, 2, 3], index=[2.0, None, 5.0, 5.0, None, 5.0]) psser = ps.from_pandas(pser) self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True)) self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True)) self.assert_eq( psser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), ) self.assert_eq( psser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), ) self.assert_eq( psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True) ) self.assert_eq( psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True) ) self.assert_eq( psser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), ) self.assert_eq( psser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), ) # Series with MultiIndex pser.index = pd.MultiIndex.from_tuples( [("x", "a"), ("x", "b"), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")] ) psser = ps.from_pandas(pser) self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True)) self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True)) self.assert_eq( psser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), ) self.assert_eq( psser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), ) # FIXME: MultiIndex.value_counts returns wrong indices. self.assert_eq( psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( psser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( psser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) # Series with MultiIndex some of index has NaN pser.index = pd.MultiIndex.from_tuples( [("x", "a"), ("x", None), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")] ) psser = ps.from_pandas(pser) self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True)) self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True)) self.assert_eq( psser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), ) self.assert_eq( psser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), ) # FIXME: MultiIndex.value_counts returns wrong indices. self.assert_eq( psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( psser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( psser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) # Series with MultiIndex some of index is NaN. # This test only available for pandas >= 0.24. if LooseVersion(pd.__version__) >= LooseVersion("0.24"): pser.index = pd.MultiIndex.from_tuples( [("x", "a"), None, ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")] ) psser = ps.from_pandas(pser) self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True)) self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True)) self.assert_eq( psser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), ) self.assert_eq( psser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), ) # FIXME: MultiIndex.value_counts returns wrong indices. self.assert_eq( psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( psser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( psser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) def test_nsmallest(self): sample_lst = [1, 2, 3, 4, np.nan, 6] pser = pd.Series(sample_lst, name="x") psser = ps.Series(sample_lst, name="x") self.assert_eq(psser.nsmallest(n=3), pser.nsmallest(n=3)) self.assert_eq(psser.nsmallest(), pser.nsmallest()) self.assert_eq((psser + 1).nsmallest(), (pser + 1).nsmallest()) def test_nlargest(self): sample_lst = [1, 2, 3, 4, np.nan, 6] pser = pd.Series(sample_lst, name="x") psser = ps.Series(sample_lst, name="x") self.assert_eq(psser.nlargest(n=3), pser.nlargest(n=3)) self.assert_eq(psser.nlargest(), pser.nlargest()) self.assert_eq((psser + 1).nlargest(), (pser + 1).nlargest()) def test_notnull(self): pser = pd.Series([1, 2, 3, 4, np.nan, 6], name="x") psser = ps.from_pandas(pser) self.assert_eq(psser.notnull(), pser.notnull()) pser = self.pser psser = self.psser self.assert_eq(psser.notnull(), pser.notnull()) def test_all(self): for pser in [ pd.Series([True, True], name="x"), pd.Series([True, False], name="x"), pd.Series([0, 1], name="x"), pd.Series([1, 2, 3], name="x"), pd.Series([True, True, None], name="x"), pd.Series([True, False, None], name="x"), pd.Series([], name="x"), pd.Series([np.nan], name="x"), ]: psser = ps.from_pandas(pser) self.assert_eq(psser.all(), pser.all()) pser = pd.Series([1, 2, 3, 4], name="x") psser = ps.from_pandas(pser) self.assert_eq((psser % 2 == 0).all(), (pser % 2 == 0).all()) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): psser.all(axis=1) def test_any(self): for pser in [ pd.Series([False, False], name="x"), pd.Series([True, False], name="x"), pd.Series([0, 1], name="x"), pd.Series([1, 2, 3], name="x"), pd.Series([True, True, None], name="x"), pd.Series([True, False, None], name="x"), pd.Series([], name="x"), pd.Series([np.nan], name="x"), ]: psser = ps.from_pandas(pser) self.assert_eq(psser.any(), pser.any()) pser = pd.Series([1, 2, 3, 4], name="x") psser = ps.from_pandas(pser) self.assert_eq((psser % 2 == 0).any(), (pser % 2 == 0).any()) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): psser.any(axis=1) def test_reset_index(self): pdf = pd.DataFrame({"foo": [1, 2, 3, 4]}, index=pd.Index(["a", "b", "c", "d"], name="idx")) psdf = ps.from_pandas(pdf) pser = pdf.foo psser = psdf.foo self.assert_eq(psser.reset_index(), pser.reset_index()) self.assert_eq(psser.reset_index(name="values"), pser.reset_index(name="values")) self.assert_eq(psser.reset_index(drop=True), pser.reset_index(drop=True)) # inplace psser.reset_index(drop=True, inplace=True) pser.reset_index(drop=True, inplace=True) self.assert_eq(psser, pser) self.assert_eq(psdf, pdf) def test_reset_index_with_default_index_types(self): pser = pd.Series([1, 2, 3], name="0", index=np.random.rand(3)) psser = ps.from_pandas(pser) with ps.option_context("compute.default_index_type", "sequence"): self.assert_eq(psser.reset_index(), pser.reset_index()) with ps.option_context("compute.default_index_type", "distributed-sequence"): # the order might be changed. self.assert_eq(psser.reset_index().sort_index(), pser.reset_index()) with ps.option_context("compute.default_index_type", "distributed"): # the index is different. self.assert_eq( psser.reset_index().to_pandas().reset_index(drop=True), pser.reset_index() ) def test_index_to_series_reset_index(self): def check(psser, pser): self.assert_eq(psser.reset_index(), pser.reset_index()) self.assert_eq(psser.reset_index(drop=True), pser.reset_index(drop=True)) pser.reset_index(drop=True, inplace=True) psser.reset_index(drop=True, inplace=True) self.assert_eq(psser, pser) pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]}, index=np.random.rand(9), ) psdf = ps.from_pandas(pdf) check(psdf.index.to_series(), pdf.index.to_series()) check(psdf.index.to_series(name="a"), pdf.index.to_series(name="a")) check(psdf.index.to_series(name=("x", "a")), pdf.index.to_series(name=("x", "a"))) def test_sort_values(self): pdf = pd.DataFrame({"x": [1, 2, 3, 4, 5, None, 7]}) psdf = ps.from_pandas(pdf) pser = pdf.x psser = psdf.x self.assert_eq(psser.sort_values(), pser.sort_values()) self.assert_eq(psser.sort_values(ascending=False), pser.sort_values(ascending=False)) self.assert_eq( psser.sort_values(na_position="first"), pser.sort_values(na_position="first") ) self.assertRaises(ValueError, lambda: psser.sort_values(na_position="invalid")) # inplace # pandas raises an exception when the Series is derived from DataFrame psser.sort_values(inplace=True) self.assert_eq(psser, pser.sort_values()) self.assert_eq(psdf, pdf) pser = pdf.x.copy() psser = psdf.x.copy() psser.sort_values(inplace=True) pser.sort_values(inplace=True) self.assert_eq(psser, pser) self.assert_eq(psdf, pdf) def test_sort_index(self): pdf = pd.DataFrame({"x": [2, 1, np.nan]}, index=["b", "a", np.nan]) psdf = ps.from_pandas(pdf) pser = pdf.x psser = psdf.x # Assert invalid parameters self.assertRaises(NotImplementedError, lambda: psser.sort_index(axis=1)) self.assertRaises(NotImplementedError, lambda: psser.sort_index(kind="mergesort")) self.assertRaises(ValueError, lambda: psser.sort_index(na_position="invalid")) # Assert default behavior without parameters self.assert_eq(psser.sort_index(), pser.sort_index()) # Assert sorting descending self.assert_eq(psser.sort_index(ascending=False), pser.sort_index(ascending=False)) # Assert sorting NA indices first self.assert_eq(psser.sort_index(na_position="first"), pser.sort_index(na_position="first")) # Assert sorting inplace # pandas sorts pdf.x by the index and update the column only # when the Series is derived from DataFrame. psser.sort_index(inplace=True) self.assert_eq(psser, pser.sort_index()) self.assert_eq(psdf, pdf) pser = pdf.x.copy() psser = psdf.x.copy() psser.sort_index(inplace=True) pser.sort_index(inplace=True) self.assert_eq(psser, pser) self.assert_eq(psdf, pdf) # Assert multi-indices pser = pd.Series(range(4), index=[["b", "b", "a", "a"], [1, 0, 1, 0]], name="0") psser = ps.from_pandas(pser) self.assert_eq(psser.sort_index(), pser.sort_index()) self.assert_eq(psser.sort_index(level=[1, 0]), pser.sort_index(level=[1, 0])) self.assert_eq(psser.reset_index().sort_index(), pser.reset_index().sort_index()) def test_to_datetime(self): pser = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 100) psser = ps.from_pandas(pser) self.assert_eq( pd.to_datetime(pser, infer_datetime_format=True), ps.to_datetime(psser, infer_datetime_format=True), ) def test_missing(self): psser = self.psser missing_functions = inspect.getmembers(MissingPandasLikeSeries, inspect.isfunction) unsupported_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function" ] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(psser, name)() deprecated_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function" ] for name in deprecated_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Series.*{}.*is deprecated".format(name) ): getattr(psser, name)() missing_properties = inspect.getmembers( MissingPandasLikeSeries, lambda o: isinstance(o, property) ) unsupported_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "unsupported_property" ] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(psser, name) deprecated_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "deprecated_property" ] for name in deprecated_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Series.*{}.*is deprecated".format(name) ): getattr(psser, name) def test_clip(self): pser = pd.Series([0, 2, 4], index=np.random.rand(3)) psser = ps.from_pandas(pser) # Assert list-like values are not accepted for 'lower' and 'upper' msg = "List-like value are not supported for 'lower' and 'upper' at the moment" with self.assertRaises(TypeError, msg=msg): psser.clip(lower=[1]) with self.assertRaises(TypeError, msg=msg): psser.clip(upper=[1]) # Assert no lower or upper self.assert_eq(psser.clip(), pser.clip()) # Assert lower only self.assert_eq(psser.clip(1), pser.clip(1)) # Assert upper only self.assert_eq(psser.clip(upper=3), pser.clip(upper=3)) # Assert lower and upper self.assert_eq(psser.clip(1, 3), pser.clip(1, 3)) # Assert behavior on string values str_psser = ps.Series(["a", "b", "c"]) self.assert_eq(str_psser.clip(1, 3), str_psser) def test_compare(self): if LooseVersion(pd.__version__) >= LooseVersion("1.1"): pser = pd.Series([1, 2]) psser = ps.from_pandas(pser) res_psdf = psser.compare(psser) self.assertTrue(res_psdf.empty) self.assert_eq(res_psdf.columns, pd.Index(["self", "other"])) self.assert_eq( pser.compare(pser + 1).sort_index(), psser.compare(psser + 1).sort_index() ) pser = pd.Series([1, 2], index=["x", "y"]) psser = ps.from_pandas(pser) self.assert_eq( pser.compare(pser + 1).sort_index(), psser.compare(psser + 1).sort_index() ) else: psser = ps.Series([1, 2]) res_psdf = psser.compare(psser) self.assertTrue(res_psdf.empty) self.assert_eq(res_psdf.columns, pd.Index(["self", "other"])) expected = ps.DataFrame([[1, 2], [2, 3]], columns=["self", "other"]) self.assert_eq(expected, psser.compare(psser + 1).sort_index()) psser = ps.Series([1, 2], index=["x", "y"]) expected = ps.DataFrame([[1, 2], [2, 3]], index=["x", "y"], columns=["self", "other"]) self.assert_eq(expected, psser.compare(psser + 1).sort_index()) def test_is_unique(self): # We can't use pandas' is_unique for comparison. pandas 0.23 ignores None pser = pd.Series([1, 2, 2, None, None]) psser = ps.from_pandas(pser) self.assertEqual(False, psser.is_unique) self.assertEqual(False, (psser + 1).is_unique) pser = pd.Series([1, None, None]) psser = ps.from_pandas(pser) self.assertEqual(False, psser.is_unique) self.assertEqual(False, (psser + 1).is_unique) pser = pd.Series([1]) psser = ps.from_pandas(pser) self.assertEqual(pser.is_unique, psser.is_unique) self.assertEqual((pser + 1).is_unique, (psser + 1).is_unique) pser = pd.Series([1, 1, 1]) psser = ps.from_pandas(pser) self.assertEqual(pser.is_unique, psser.is_unique) self.assertEqual((pser + 1).is_unique, (psser + 1).is_unique) def test_to_list(self): self.assert_eq(self.psser.tolist(), self.pser.tolist()) def test_append(self): pser1 = pd.Series([1, 2, 3], name="0") pser2 = pd.Series([4, 5, 6], name="0") pser3 = pd.Series([4, 5, 6], index=[3, 4, 5], name="0") psser1 = ps.from_pandas(pser1) psser2 = ps.from_pandas(pser2) psser3 = ps.from_pandas(pser3) self.assert_eq(psser1.append(psser2), pser1.append(pser2)) self.assert_eq(psser1.append(psser3), pser1.append(pser3)) self.assert_eq( psser1.append(psser2, ignore_index=True), pser1.append(pser2, ignore_index=True) ) psser1.append(psser3, verify_integrity=True) msg = "Indices have overlapping values" with self.assertRaises(ValueError, msg=msg): psser1.append(psser2, verify_integrity=True) def test_map(self): pser = pd.Series(["cat", "dog", None, "rabbit"]) psser = ps.from_pandas(pser) # Currently Koalas doesn't return NaN as pandas does. self.assert_eq(psser.map({}), pser.map({}).replace({pd.np.nan: None})) d = defaultdict(lambda: "abc") self.assertTrue("abc" in repr(psser.map(d))) self.assert_eq(psser.map(d), pser.map(d)) def tomorrow(date) -> datetime: return date + timedelta(days=1) pser = pd.Series([datetime(2019, 10, 24)]) psser = ps.from_pandas(pser) self.assert_eq(psser.map(tomorrow), pser.map(tomorrow)) def test_add_prefix(self): pser = pd.Series([1, 2, 3, 4], name="0") psser = ps.from_pandas(pser) self.assert_eq(pser.add_prefix("item_"), psser.add_prefix("item_")) pser = pd.Series( [1, 2, 3], name="0", index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]), ) psser = ps.from_pandas(pser) self.assert_eq(pser.add_prefix("item_"), psser.add_prefix("item_")) def test_add_suffix(self): pser = pd.Series([1, 2, 3, 4], name="0") psser = ps.from_pandas(pser) self.assert_eq(pser.add_suffix("_item"), psser.add_suffix("_item")) pser = pd.Series( [1, 2, 3], name="0", index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]), ) psser = ps.from_pandas(pser) self.assert_eq(pser.add_suffix("_item"), psser.add_suffix("_item")) def test_cummin(self): pser = pd.Series([1.0, None, 0.0, 4.0, 9.0]) psser = ps.from_pandas(pser) self.assert_eq(pser.cummin(), psser.cummin()) self.assert_eq(pser.cummin(skipna=False), psser.cummin(skipna=False)) self.assert_eq(pser.cummin().sum(), psser.cummin().sum()) # with reversed index pser.index = [4, 3, 2, 1, 0] psser = ps.from_pandas(pser) self.assert_eq(pser.cummin(), psser.cummin()) self.assert_eq(pser.cummin(skipna=False), psser.cummin(skipna=False)) def test_cummax(self): pser = pd.Series([1.0, None, 0.0, 4.0, 9.0]) psser = ps.from_pandas(pser) self.assert_eq(pser.cummax(), psser.cummax()) self.assert_eq(pser.cummax(skipna=False), psser.cummax(skipna=False)) self.assert_eq(pser.cummax().sum(), psser.cummax().sum()) # with reversed index pser.index = [4, 3, 2, 1, 0] psser = ps.from_pandas(pser) self.assert_eq(pser.cummax(), psser.cummax()) self.assert_eq(pser.cummax(skipna=False), psser.cummax(skipna=False)) def test_cumsum(self): pser = pd.Series([1.0, None, 0.0, 4.0, 9.0]) psser = ps.from_pandas(pser) self.assert_eq(pser.cumsum(), psser.cumsum()) self.assert_eq(pser.cumsum(skipna=False), psser.cumsum(skipna=False)) self.assert_eq(pser.cumsum().sum(), psser.cumsum().sum()) # with reversed index pser.index = [4, 3, 2, 1, 0] psser = ps.from_pandas(pser) self.assert_eq(pser.cumsum(), psser.cumsum()) self.assert_eq(pser.cumsum(skipna=False), psser.cumsum(skipna=False)) # bool pser = pd.Series([True, True, False, True]) psser = ps.from_pandas(pser) self.assert_eq(pser.cumsum().astype(int), psser.cumsum()) self.assert_eq(pser.cumsum(skipna=False).astype(int), psser.cumsum(skipna=False)) with self.assertRaisesRegex(TypeError, r"Could not convert object \(string\) to numeric"): ps.Series(["a", "b", "c", "d"]).cumsum() def test_cumprod(self): pser = pd.Series([1.0, None, 1.0, 4.0, 9.0]) psser = ps.from_pandas(pser) self.assert_eq(pser.cumprod(), psser.cumprod()) self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False)) self.assert_eq(pser.cumprod().sum(), psser.cumprod().sum()) # with integer type pser = pd.Series([1, 10, 1, 4, 9]) psser = ps.from_pandas(pser) self.assert_eq(pser.cumprod(), psser.cumprod()) self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False)) self.assert_eq(pser.cumprod().sum(), psser.cumprod().sum()) # with reversed index pser.index = [4, 3, 2, 1, 0] psser = ps.from_pandas(pser) self.assert_eq(pser.cumprod(), psser.cumprod()) self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False)) # including zero pser = pd.Series([1, 2, 0, 3]) psser = ps.from_pandas(pser) self.assert_eq(pser.cumprod(), psser.cumprod()) self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False)) # including negative values pser = pd.Series([1, -1, -2]) psser = ps.from_pandas(pser) self.assert_eq(pser.cumprod(), psser.cumprod()) self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False)) # bool pser = pd.Series([True, True, False, True]) psser = ps.from_pandas(pser) self.assert_eq(pser.cumprod(), psser.cumprod()) self.assert_eq(pser.cumprod(skipna=False).astype(int), psser.cumprod(skipna=False)) with self.assertRaisesRegex(TypeError, r"Could not convert object \(string\) to numeric"): ps.Series(["a", "b", "c", "d"]).cumprod() def test_median(self): with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"): ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).median(accuracy="a") def test_rank(self): pser = pd.Series([1, 2, 3, 1], name="x") psser = ps.from_pandas(pser) self.assert_eq(pser.rank(), psser.rank().sort_index()) self.assert_eq(pser.rank().sum(), psser.rank().sum()) self.assert_eq(pser.rank(ascending=False), psser.rank(ascending=False).sort_index()) self.assert_eq(pser.rank(method="min"), psser.rank(method="min").sort_index()) self.assert_eq(pser.rank(method="max"), psser.rank(method="max").sort_index()) self.assert_eq(pser.rank(method="first"), psser.rank(method="first").sort_index()) self.assert_eq(pser.rank(method="dense"), psser.rank(method="dense").sort_index()) msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'" with self.assertRaisesRegex(ValueError, msg): psser.rank(method="nothing") msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'" with self.assertRaisesRegex(ValueError, msg): psser.rank(method="nothing") midx = pd.MultiIndex.from_tuples([("a", "b"), ("a", "c"), ("b", "c"), ("c", "d")]) pser.index = midx psser = ps.from_pandas(pser) msg = "rank do not support MultiIndex now" with self.assertRaisesRegex(NotImplementedError, msg): psser.rank(method="min") def test_round(self): pser = pd.Series([0.028208, 0.038683, 0.877076], name="x") psser = ps.from_pandas(pser) self.assert_eq(pser.round(2), psser.round(2)) msg = "decimals must be an integer" with self.assertRaisesRegex(TypeError, msg): psser.round(1.5) def test_quantile(self): pser = pd.Series([]) psser = ps.from_pandas(pser) self.assert_eq(psser.quantile(0.5), pser.quantile(0.5)) self.assert_eq(psser.quantile([0.25, 0.5, 0.75]), pser.quantile([0.25, 0.5, 0.75])) with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"): ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(accuracy="a") with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"): ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q="a") with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"): ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q=["a"]) with self.assertRaisesRegex( ValueError, "percentiles should all be in the interval \\[0, 1\\]" ): ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q=1.1) with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"): ps.Series(["a", "b", "c"]).quantile() with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"): ps.Series(["a", "b", "c"]).quantile([0.25, 0.5, 0.75]) def test_idxmax(self): pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"]) psser = ps.Series(pser) self.assertEqual(psser.idxmax(), pser.idxmax()) self.assertEqual(psser.idxmax(skipna=False), pser.idxmax(skipna=False)) index = pd.MultiIndex.from_arrays( [["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second") ) pser = pd.Series(data=[1, 2, 4, 5], index=index) psser = ps.Series(pser) self.assertEqual(psser.idxmax(), pser.idxmax()) self.assertEqual(psser.idxmax(skipna=False), pser.idxmax(skipna=False)) psser = ps.Series([]) with self.assertRaisesRegex(ValueError, "an empty sequence"): psser.idxmax() pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8]) psser = ps.Series(pser) self.assertEqual(psser.idxmax(), pser.idxmax()) self.assertEqual(repr(psser.idxmax(skipna=False)), repr(pser.idxmax(skipna=False))) def test_idxmin(self): pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"]) psser = ps.Series(pser) self.assertEqual(psser.idxmin(), pser.idxmin()) self.assertEqual(psser.idxmin(skipna=False), pser.idxmin(skipna=False)) index = pd.MultiIndex.from_arrays( [["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second") ) pser = pd.Series(data=[1, 2, 4, 5], index=index) psser = ps.Series(pser) self.assertEqual(psser.idxmin(), pser.idxmin()) self.assertEqual(psser.idxmin(skipna=False), pser.idxmin(skipna=False)) psser = ps.Series([]) with self.assertRaisesRegex(ValueError, "an empty sequence"): psser.idxmin() pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8]) psser = ps.Series(pser) self.assertEqual(psser.idxmin(), pser.idxmin()) self.assertEqual(repr(psser.idxmin(skipna=False)), repr(pser.idxmin(skipna=False))) def test_shift(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") psser = ps.Series(pser) self.assert_eq(psser.shift(2), pser.shift(2)) self.assert_eq(psser.shift().shift(-1), pser.shift().shift(-1)) self.assert_eq(psser.shift().sum(), pser.shift().sum()) if LooseVersion(pd.__version__) < LooseVersion("0.24.2"): self.assert_eq(psser.shift(periods=2), pser.shift(periods=2)) else: self.assert_eq( psser.shift(periods=2, fill_value=0), pser.shift(periods=2, fill_value=0) ) with self.assertRaisesRegex(TypeError, "periods should be an int; however"): psser.shift(periods=1.5) def test_diff(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") psser = ps.Series(pser) self.assert_eq(psser.diff(2), pser.diff(2)) self.assert_eq(psser.diff().diff(-1), pser.diff().diff(-1)) self.assert_eq(psser.diff().sum(), pser.diff().sum()) def _test_numeric_astype(self, pser): psser = ps.Series(pser) self.assert_eq(psser.astype(int), pser.astype(int)) self.assert_eq(psser.astype(np.int), pser.astype(np.int)) self.assert_eq(psser.astype(np.int8), pser.astype(np.int8)) self.assert_eq(psser.astype(np.int16), pser.astype(np.int16)) self.assert_eq(psser.astype(np.int32), pser.astype(np.int32)) self.assert_eq(psser.astype(np.int64), pser.astype(np.int64)) self.assert_eq(psser.astype(np.byte), pser.astype(np.byte)) self.assert_eq(psser.astype("int"), pser.astype("int")) self.assert_eq(psser.astype("int8"), pser.astype("int8")) self.assert_eq(psser.astype("int16"), pser.astype("int16")) self.assert_eq(psser.astype("int32"), pser.astype("int32")) self.assert_eq(psser.astype("int64"), pser.astype("int64")) self.assert_eq(psser.astype("b"), pser.astype("b")) self.assert_eq(psser.astype("byte"), pser.astype("byte")) self.assert_eq(psser.astype("i"), pser.astype("i")) self.assert_eq(psser.astype("long"), pser.astype("long")) self.assert_eq(psser.astype("short"), pser.astype("short")) self.assert_eq(psser.astype(np.float), pser.astype(np.float)) self.assert_eq(psser.astype(np.float32), pser.astype(np.float32)) self.assert_eq(psser.astype(np.float64), pser.astype(np.float64)) self.assert_eq(psser.astype("float"), pser.astype("float")) self.assert_eq(psser.astype("float32"), pser.astype("float32")) self.assert_eq(psser.astype("float64"), pser.astype("float64")) self.assert_eq(psser.astype("double"), pser.astype("double")) self.assert_eq(psser.astype("f"), pser.astype("f")) self.assert_eq(psser.astype(bool), pser.astype(bool)) self.assert_eq(psser.astype("bool"), pser.astype("bool")) self.assert_eq(psser.astype("?"), pser.astype("?")) self.assert_eq(psser.astype(np.unicode_), pser.astype(np.unicode_)) self.assert_eq(psser.astype("str"), pser.astype("str")) self.assert_eq(psser.astype("U"), pser.astype("U")) if extension_dtypes_available: from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype self._check_extension(psser.astype("Int8"), pser.astype("Int8")) self._check_extension(psser.astype("Int16"), pser.astype("Int16")) self._check_extension(psser.astype("Int32"), pser.astype("Int32")) self._check_extension(psser.astype("Int64"), pser.astype("Int64")) self._check_extension(psser.astype(Int8Dtype()), pser.astype(Int8Dtype())) self._check_extension(psser.astype(Int16Dtype()), pser.astype(Int16Dtype())) self._check_extension(psser.astype(Int32Dtype()), pser.astype(Int32Dtype())) self._check_extension(psser.astype(Int64Dtype()), pser.astype(Int64Dtype())) if extension_object_dtypes_available: from pandas import StringDtype if LooseVersion(pd.__version__) >= LooseVersion("1.1"): self._check_extension(psser.astype("string"), pser.astype("string")) self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype())) else: self._check_extension( psser.astype("string"), pd.Series(["10", "20", "15", "30", "45"], name="x", dtype="string"), ) self._check_extension( psser.astype(StringDtype()), pd.Series(["10", "20", "15", "30", "45"], name="x", dtype=StringDtype()), ) if extension_float_dtypes_available: from pandas import Float32Dtype, Float64Dtype self._check_extension(psser.astype("Float32"), pser.astype("Float32")) self._check_extension(psser.astype("Float64"), pser.astype("Float64")) self._check_extension(psser.astype(Float32Dtype()), pser.astype(Float32Dtype())) self._check_extension(psser.astype(Float64Dtype()), pser.astype(Float64Dtype())) def test_astype(self): psers = [pd.Series([10, 20, 15, 30, 45], name="x")] if extension_dtypes_available: psers.append(pd.Series([10, 20, 15, 30, 45], name="x", dtype="Int64")) if extension_float_dtypes_available: psers.append(pd.Series([10, 20, 15, 30, 45], name="x", dtype="Float64")) for pser in psers: self._test_numeric_astype(pser) pser = pd.Series([10, 20, 15, 30, 45, None, np.nan], name="x") psser = ps.Series(pser) self.assert_eq(psser.astype(bool), pser.astype(bool)) self.assert_eq(psser.astype(str), pser.astype(str)) pser = pd.Series(["hi", "hi ", " ", " \t", "", None], name="x") psser = ps.Series(pser) self.assert_eq(psser.astype(bool), pser.astype(bool)) if LooseVersion("1.1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.1.4"): # a pandas bug: https://github.com/databricks/koalas/pull/1818#issuecomment-703961980 self.assert_eq(psser.astype(str).tolist(), ["hi", "hi ", " ", " \t", "", "None"]) else: self.assert_eq(psser.astype(str), pser.astype(str)) self.assert_eq(psser.str.strip().astype(bool), pser.str.strip().astype(bool)) if extension_object_dtypes_available: from pandas import StringDtype self._check_extension(psser.astype("string"), pser.astype("string")) self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype())) pser = pd.Series([True, False, None], name="x") psser = ps.Series(pser) self.assert_eq(psser.astype(bool), pser.astype(bool)) self.assert_eq(psser.astype(str), pser.astype(str)) if extension_object_dtypes_available: from pandas import BooleanDtype, StringDtype self._check_extension(psser.astype("boolean"), pser.astype("boolean")) self._check_extension(psser.astype(BooleanDtype()), pser.astype(BooleanDtype())) if LooseVersion(pd.__version__) >= LooseVersion("1.1"): self._check_extension(psser.astype("string"), pser.astype("string")) self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype())) else: self._check_extension( psser.astype("string"), pd.Series(["True", "False", None], name="x", dtype="string"), ) self._check_extension( psser.astype(StringDtype()), pd.Series(["True", "False", None], name="x", dtype=StringDtype()), ) pser = pd.Series(["2020-10-27 00:00:01", None], name="x") psser = ps.Series(pser) self.assert_eq(psser.astype(np.datetime64), pser.astype(np.datetime64)) self.assert_eq(psser.astype("datetime64[ns]"), pser.astype("datetime64[ns]")) self.assert_eq(psser.astype("M"), pser.astype("M")) self.assert_eq(psser.astype("M").astype(str), pser.astype("M").astype(str)) # Comment out the below test cause because pandas returns `NaT` or `nan` randomly # self.assert_eq( # psser.astype("M").dt.date.astype(str), pser.astype("M").dt.date.astype(str) # ) if extension_object_dtypes_available: from pandas import StringDtype # The behavior of casting datetime to nullable string is changed from pandas 1.3. if LooseVersion(pd.__version__) >= LooseVersion("1.3"): self._check_extension( psser.astype("M").astype("string"), pser.astype("M").astype("string") ) self._check_extension( psser.astype("M").astype(StringDtype()), pser.astype("M").astype(StringDtype()) ) else: expected = ps.Series(["2020-10-27 00:00:01", None], name="x", dtype="string") self._check_extension(psser.astype("M").astype("string"), expected) self._check_extension(psser.astype("M").astype(StringDtype()), expected) with self.assertRaisesRegex(TypeError, "not understood"): psser.astype("int63") def test_aggregate(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") psser = ps.Series(pser) msg = "func must be a string or list of strings" with self.assertRaisesRegex(TypeError, msg): psser.aggregate({"x": ["min", "max"]}) msg = ( "If the given function is a list, it " "should only contains function names as strings." ) with self.assertRaisesRegex(ValueError, msg): psser.aggregate(["min", max]) def test_drop(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") psser = ps.Series(pser) self.assert_eq(psser.drop(1), pser.drop(1)) self.assert_eq(psser.drop([1, 4]), pser.drop([1, 4])) msg = "Need to specify at least one of 'labels' or 'index'" with self.assertRaisesRegex(ValueError, msg): psser.drop() self.assertRaises(KeyError, lambda: psser.drop((0, 1))) # For MultiIndex midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) psser = ps.from_pandas(pser) self.assert_eq(psser.drop("lama"), pser.drop("lama")) self.assert_eq(psser.drop(labels="weight", level=1), pser.drop(labels="weight", level=1)) self.assert_eq(psser.drop(("lama", "weight")), pser.drop(("lama", "weight"))) self.assert_eq( psser.drop([("lama", "speed"), ("falcon", "weight")]), pser.drop([("lama", "speed"), ("falcon", "weight")]), ) self.assert_eq(psser.drop({"lama": "speed"}), pser.drop({"lama": "speed"})) msg = "'level' should be less than the number of indexes" with self.assertRaisesRegex(ValueError, msg): psser.drop(labels="weight", level=2) msg = ( "If the given index is a list, it " "should only contains names as all tuples or all non tuples " "that contain index names" ) with self.assertRaisesRegex(ValueError, msg): psser.drop(["lama", ["cow", "falcon"]]) msg = "Cannot specify both 'labels' and 'index'" with self.assertRaisesRegex(ValueError, msg): psser.drop("lama", index="cow") msg = r"'Key length \(2\) exceeds index depth \(3\)'" with self.assertRaisesRegex(KeyError, msg): psser.drop(("lama", "speed", "x")) def test_pop(self): midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) pdf = pd.DataFrame({"x": [45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3]}, index=midx) psdf = ps.from_pandas(pdf) pser = pdf.x psser = psdf.x self.assert_eq(psser.pop(("lama", "speed")), pser.pop(("lama", "speed"))) self.assert_eq(psser, pser) self.assert_eq(psdf, pdf) msg = r"'Key length \(3\) exceeds index depth \(2\)'" with self.assertRaisesRegex(KeyError, msg): psser.pop(("lama", "speed", "x")) msg = "'key' should be string or tuple that contains strings" with self.assertRaisesRegex(TypeError, msg): psser.pop(["lama", "speed"]) pser = pd.Series(["a", "b", "c", "a"], dtype="category") psser = ps.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.3.0"): self.assert_eq(psser.pop(0), pser.pop(0)) self.assert_eq(psser, pser) self.assert_eq(psser.pop(3), pser.pop(3)) self.assert_eq(psser, pser) else: # Before pandas 1.3.0, `pop` modifies the dtype of categorical series wrongly. self.assert_eq(psser.pop(0), "a") self.assert_eq( psser, pd.Series( pd.Categorical(["b", "c", "a"], categories=["a", "b", "c"]), index=[1, 2, 3] ), ) self.assert_eq(psser.pop(3), "a") self.assert_eq( psser, pd.Series(pd.Categorical(["b", "c"], categories=["a", "b", "c"]), index=[1, 2]), ) def test_replace(self): pser = pd.Series([10, 20, 15, 30, np.nan], name="x") psser = ps.Series(pser) self.assert_eq(psser.replace(), pser.replace()) self.assert_eq(psser.replace({}), pser.replace({})) self.assert_eq(psser.replace(np.nan, 45), pser.replace(np.nan, 45)) self.assert_eq(psser.replace([10, 15], 45), pser.replace([10, 15], 45)) self.assert_eq(psser.replace((10, 15), 45), pser.replace((10, 15), 45)) self.assert_eq(psser.replace([10, 15], [45, 50]), pser.replace([10, 15], [45, 50])) self.assert_eq(psser.replace((10, 15), (45, 50)), pser.replace((10, 15), (45, 50))) msg = "'to_replace' should be one of str, list, tuple, dict, int, float" with self.assertRaisesRegex(TypeError, msg): psser.replace(ps.range(5)) msg = "Replacement lists must match in length. Expecting 3 got 2" with self.assertRaisesRegex(ValueError, msg): psser.replace([10, 20, 30], [1, 2]) msg = "replace currently not support for regex" with self.assertRaisesRegex(NotImplementedError, msg): psser.replace(r"^1.$", regex=True) def test_xs(self): midx = pd.MultiIndex( [["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) psser = ps.from_pandas(pser) self.assert_eq(psser.xs(("a", "lama", "speed")), pser.xs(("a", "lama", "speed"))) def test_duplicates(self): psers = { "test on texts": pd.Series( ["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal" ), "test on numbers": pd.Series([1, 1, 2, 4, 3]), } keeps = ["first", "last", False] for (msg, pser), keep in product(psers.items(), keeps): with self.subTest(msg, keep=keep): psser = ps.Series(pser) self.assert_eq( pser.drop_duplicates(keep=keep).sort_values(), psser.drop_duplicates(keep=keep).sort_values(), ) def test_update(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") psser = ps.Series(pser) msg = "'other' must be a Series" with self.assertRaisesRegex(TypeError, msg): psser.update(10) def _get_data(): pdf = pd.DataFrame( { "a": [None, 2, 3, 4, 5, 6, 7, 8, None], "b": [None, 5, None, 3, 2, 1, None, 0, 0], "c": [1, 5, 1, 3, 2, 1, 1, 0, 0], }, ) psdf = ps.from_pandas(pdf) return pdf, psdf pdf, psdf = _get_data() psdf.a.update(psdf.a) pdf.a.update(pdf.a) self.assert_eq(psdf, pdf) pdf, psdf = _get_data() psdf.a.update(psdf.b) pdf.a.update(pdf.b) self.assert_eq(psdf, pdf) pdf, psdf = _get_data() pser = pdf.a psser = psdf.a pser.update(pdf.b) psser.update(psdf.b) self.assert_eq(psser, pser) self.assert_eq(psdf, pdf) def test_where(self): pser1 = pd.Series([0, 1, 2, 3, 4]) psser1 = ps.from_pandas(pser1) self.assert_eq(pser1.where(pser1 > 3), psser1.where(psser1 > 3).sort_index()) def test_mask(self): pser1 = pd.Series([0, 1, 2, 3, 4]) psser1 = ps.from_pandas(pser1) self.assert_eq(pser1.mask(pser1 > 3), psser1.mask(psser1 > 3).sort_index()) def test_truncate(self): pser1 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7]) psser1 = ps.Series(pser1) pser2 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[7, 6, 5, 4, 3, 2, 1]) psser2 = ps.Series(pser2) self.assert_eq(psser1.truncate(), pser1.truncate()) self.assert_eq(psser1.truncate(before=2), pser1.truncate(before=2)) self.assert_eq(psser1.truncate(after=5), pser1.truncate(after=5)) self.assert_eq(psser1.truncate(copy=False), pser1.truncate(copy=False)) self.assert_eq(psser1.truncate(2, 5, copy=False), pser1.truncate(2, 5, copy=False)) # The bug for these tests has been fixed in pandas 1.1.0. if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"): self.assert_eq(psser2.truncate(4, 6), pser2.truncate(4, 6)) self.assert_eq(psser2.truncate(4, 6, copy=False), pser2.truncate(4, 6, copy=False)) else: expected_psser = ps.Series([20, 30, 40], index=[6, 5, 4]) self.assert_eq(psser2.truncate(4, 6), expected_psser) self.assert_eq(psser2.truncate(4, 6, copy=False), expected_psser) psser = ps.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 3, 2, 1]) msg = "truncate requires a sorted index" with self.assertRaisesRegex(ValueError, msg): psser.truncate() psser = ps.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7]) msg = "Truncate: 2 must be after 5" with self.assertRaisesRegex(ValueError, msg): psser.truncate(5, 2) def test_getitem(self): pser = pd.Series([10, 20, 15, 30, 45], ["A", "A", "B", "C", "D"]) psser = ps.Series(pser) self.assert_eq(psser["A"], pser["A"]) self.assert_eq(psser["B"], pser["B"]) self.assert_eq(psser[psser > 15], pser[pser > 15]) # for MultiIndex midx = pd.MultiIndex( [["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]], ) pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], name="0", index=midx) psser = ps.Series(pser) self.assert_eq(psser["a"], pser["a"]) self.assert_eq(psser["a", "lama"], pser["a", "lama"]) self.assert_eq(psser[psser > 1.5], pser[pser > 1.5]) msg = r"'Key length \(4\) exceeds index depth \(3\)'" with self.assertRaisesRegex(KeyError, msg): psser[("a", "lama", "speed", "x")] def test_keys(self): midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) psser = ps.from_pandas(pser) self.assert_eq(psser.keys(), pser.keys()) def test_index(self): # to check setting name of Index properly. idx = pd.Index([1, 2, 3, 4, 5, 6, 7, 8, 9]) pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=idx) psser = ps.from_pandas(pser) psser.name = "koalas" pser.name = "koalas" self.assert_eq(psser.index.name, pser.index.name) # for check setting names of MultiIndex properly. psser.names = ["hello", "koalas"] pser.names = ["hello", "koalas"] self.assert_eq(psser.index.names, pser.index.names) def test_pct_change(self): pser = pd.Series([90, 91, 85], index=[2, 4, 1]) psser = ps.from_pandas(pser) self.assert_eq(psser.pct_change(), pser.pct_change(), check_exact=False) self.assert_eq(psser.pct_change().sum(), pser.pct_change().sum(), almost=True) self.assert_eq(psser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False) self.assert_eq(psser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False) self.assert_eq(psser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000)) self.assert_eq(psser.pct_change(periods=100000000), pser.pct_change(periods=100000000)) # for MultiIndex midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) psser = ps.from_pandas(pser) self.assert_eq(psser.pct_change(), pser.pct_change(), check_exact=False) self.assert_eq(psser.pct_change().sum(), pser.pct_change().sum(), almost=True) self.assert_eq(psser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False) self.assert_eq(psser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False) self.assert_eq(psser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000)) self.assert_eq(psser.pct_change(periods=100000000), pser.pct_change(periods=100000000)) def test_axes(self): pser = pd.Series([90, 91, 85], index=[2, 4, 1]) psser = ps.from_pandas(pser) self.assert_eq(psser.axes, pser.axes) # for MultiIndex midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) psser = ps.from_pandas(pser) self.assert_eq(psser.axes, pser.axes) def test_udt(self): sparse_values = {0: 0.1, 1: 1.1} sparse_vector = SparseVector(len(sparse_values), sparse_values) pser = pd.Series([sparse_vector]) psser = ps.from_pandas(pser) self.assert_eq(psser, pser) def test_repeat(self): pser = pd.Series(["a", "b", "c"], name="0", index=np.random.rand(3)) psser = ps.from_pandas(pser) self.assert_eq(psser.repeat(3).sort_index(), pser.repeat(3).sort_index()) self.assert_eq(psser.repeat(0).sort_index(), pser.repeat(0).sort_index()) self.assertRaises(ValueError, lambda: psser.repeat(-1)) self.assertRaises(TypeError, lambda: psser.repeat("abc")) pdf = pd.DataFrame({"a": ["a", "b", "c"], "rep": [10, 20, 30]}, index=np.random.rand(3)) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.a.repeat(psdf.rep).sort_index(), pdf.a.repeat(pdf.rep).sort_index()) def test_take(self): pser = pd.Series([100, 200, 300, 400, 500], name="Koalas") psser = ps.from_pandas(pser) self.assert_eq(psser.take([0, 2, 4]).sort_values(), pser.take([0, 2, 4]).sort_values()) self.assert_eq( psser.take(range(0, 5, 2)).sort_values(), pser.take(range(0, 5, 2)).sort_values() ) self.assert_eq(psser.take([-4, -2, 0]).sort_values(), pser.take([-4, -2, 0]).sort_values()) self.assert_eq( psser.take(range(-2, 1, 2)).sort_values(), pser.take(range(-2, 1, 2)).sort_values() ) # Checking the type of indices. self.assertRaises(TypeError, lambda: psser.take(1)) self.assertRaises(TypeError, lambda: psser.take("1")) self.assertRaises(TypeError, lambda: psser.take({1, 2})) self.assertRaises(TypeError, lambda: psser.take({1: None, 2: None})) def test_divmod(self): pser = pd.Series([100, None, 300, None, 500], name="Koalas") psser = ps.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): kdiv, kmod = psser.divmod(-100) pdiv, pmod = pser.divmod(-100) self.assert_eq(kdiv, pdiv) self.assert_eq(kmod, pmod) kdiv, kmod = psser.divmod(100) pdiv, pmod = pser.divmod(100) self.assert_eq(kdiv, pdiv) self.assert_eq(kmod, pmod) elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"): kdiv, kmod = psser.divmod(-100) pdiv, pmod = pser.floordiv(-100), pser.mod(-100) self.assert_eq(kdiv, pdiv) self.assert_eq(kmod, pmod) kdiv, kmod = psser.divmod(100) pdiv, pmod = pser.floordiv(100), pser.mod(100) self.assert_eq(kdiv, pdiv) self.assert_eq(kmod, pmod) def test_rdivmod(self): pser = pd.Series([100, None, 300, None, 500]) psser = ps.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): krdiv, krmod = psser.rdivmod(-100) prdiv, prmod = pser.rdivmod(-100) self.assert_eq(krdiv, prdiv) self.assert_eq(krmod, prmod) krdiv, krmod = psser.rdivmod(100) prdiv, prmod = pser.rdivmod(100) self.assert_eq(krdiv, prdiv) self.assert_eq(krmod, prmod) elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"): krdiv, krmod = psser.rdivmod(-100) prdiv, prmod = pser.rfloordiv(-100), pser.rmod(-100) self.assert_eq(krdiv, prdiv) self.assert_eq(krmod, prmod) krdiv, krmod = psser.rdivmod(100) prdiv, prmod = pser.rfloordiv(100), pser.rmod(100) self.assert_eq(krdiv, prdiv) self.assert_eq(krmod, prmod) def test_mod(self): pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas") psser = ps.from_pandas(pser) self.assert_eq(psser.mod(-150), pser.mod(-150)) self.assert_eq(psser.mod(0), pser.mod(0)) self.assert_eq(psser.mod(150), pser.mod(150)) pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.a.mod(psdf.b), pdf.a.mod(pdf.b)) def test_mode(self): pser = pd.Series([0, 0, 1, 1, 1, np.nan, np.nan, np.nan]) psser = ps.from_pandas(pser) self.assert_eq(psser.mode(), pser.mode()) if LooseVersion(pd.__version__) >= LooseVersion("0.24"): # The `dropna` argument is added in pandas 0.24. self.assert_eq( psser.mode(dropna=False).sort_values().reset_index(drop=True), pser.mode(dropna=False).sort_values().reset_index(drop=True), ) pser.name = "x" psser = ps.from_pandas(pser) self.assert_eq(psser.mode(), pser.mode()) if LooseVersion(pd.__version__) >= LooseVersion("0.24"): # The `dropna` argument is added in pandas 0.24. self.assert_eq( psser.mode(dropna=False).sort_values().reset_index(drop=True), pser.mode(dropna=False).sort_values().reset_index(drop=True), ) def test_rmod(self): pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas") psser = ps.from_pandas(pser) self.assert_eq(psser.rmod(-150), pser.rmod(-150)) self.assert_eq(psser.rmod(0), pser.rmod(0)) self.assert_eq(psser.rmod(150), pser.rmod(150)) pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.a.rmod(psdf.b), pdf.a.rmod(pdf.b)) def test_asof(self): pser = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40], name="Koalas") psser = ps.from_pandas(pser) self.assert_eq(psser.asof(20), pser.asof(20)) self.assert_eq(psser.asof([5, 20]).sort_index(), pser.asof([5, 20]).sort_index()) self.assert_eq(psser.asof(100), pser.asof(100)) self.assert_eq(repr(psser.asof(-100)), repr(pser.asof(-100))) self.assert_eq(psser.asof([-100, 100]).sort_index(), pser.asof([-100, 100]).sort_index()) # where cannot be an Index, Series or a DataFrame self.assertRaises(ValueError, lambda: psser.asof(ps.Index([-100, 100]))) self.assertRaises(ValueError, lambda: psser.asof(ps.Series([-100, 100]))) self.assertRaises(ValueError, lambda: psser.asof(ps.DataFrame({"A": [1, 2, 3]}))) # asof is not supported for a MultiIndex pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("y", "d")]) psser = ps.from_pandas(pser) self.assertRaises(ValueError, lambda: psser.asof(20)) # asof requires a sorted index (More precisely, should be a monotonic increasing) psser = ps.Series([1, 2, np.nan, 4], index=[10, 30, 20, 40], name="Koalas") self.assertRaises(ValueError, lambda: psser.asof(20)) psser = ps.Series([1, 2, np.nan, 4], index=[40, 30, 20, 10], name="Koalas") self.assertRaises(ValueError, lambda: psser.asof(20)) pidx = pd.DatetimeIndex(["2013-12-31", "2014-01-02", "2014-01-03"]) pser = pd.Series([1, 2, np.nan], index=pidx) psser = ps.from_pandas(pser) self.assert_eq(psser.asof("2014-01-01"), pser.asof("2014-01-01")) self.assert_eq(psser.asof("2014-01-02"), pser.asof("2014-01-02")) self.assert_eq(repr(psser.asof("1999-01-02")), repr(pser.asof("1999-01-02"))) def test_squeeze(self): # Single value pser = pd.Series([90]) psser = ps.from_pandas(pser) self.assert_eq(psser.squeeze(), pser.squeeze()) # Single value with MultiIndex midx = pd.MultiIndex.from_tuples([("a", "b", "c")]) pser = pd.Series([90], index=midx) psser = ps.from_pandas(pser) self.assert_eq(psser.squeeze(), pser.squeeze()) # Multiple values pser = pd.Series([90, 91, 85]) psser = ps.from_pandas(pser) self.assert_eq(psser.squeeze(), pser.squeeze()) # Multiple values with MultiIndex midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) pser = pd.Series([90, 91, 85], index=midx) psser = ps.from_pandas(pser) self.assert_eq(psser.squeeze(), pser.squeeze()) def test_swaplevel(self): # MultiIndex with two levels arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]] pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color")) pser = pd.Series(["a", "b", "c", "d"], index=pidx) psser = ps.from_pandas(pser) self.assert_eq(pser.swaplevel(), psser.swaplevel()) self.assert_eq(pser.swaplevel(0, 1), psser.swaplevel(0, 1)) self.assert_eq(pser.swaplevel(1, 1), psser.swaplevel(1, 1)) self.assert_eq(pser.swaplevel("number", "color"), psser.swaplevel("number", "color")) # MultiIndex with more than two levels arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]] pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size")) pser = pd.Series(["a", "b", "c", "d"], index=pidx) psser = ps.from_pandas(pser) self.assert_eq(pser.swaplevel(), psser.swaplevel()) self.assert_eq(pser.swaplevel(0, 1), psser.swaplevel(0, 1)) self.assert_eq(pser.swaplevel(0, 2), psser.swaplevel(0, 2)) self.assert_eq(pser.swaplevel(1, 2), psser.swaplevel(1, 2)) self.assert_eq(pser.swaplevel(1, 1), psser.swaplevel(1, 1)) self.assert_eq(pser.swaplevel(-1, -2), psser.swaplevel(-1, -2)) self.assert_eq(pser.swaplevel("number", "color"), psser.swaplevel("number", "color")) self.assert_eq(pser.swaplevel("number", "size"), psser.swaplevel("number", "size")) self.assert_eq(pser.swaplevel("color", "size"), psser.swaplevel("color", "size")) # Error conditions self.assertRaises(AssertionError, lambda: ps.Series([1, 2]).swaplevel()) self.assertRaises(IndexError, lambda: psser.swaplevel(0, 9)) self.assertRaises(KeyError, lambda: psser.swaplevel("not_number", "color")) self.assertRaises(AssertionError, lambda: psser.swaplevel(copy=False)) def test_swapaxes(self): pser = pd.Series([1, 2, 3], index=["x", "y", "z"], name="ser") psser = ps.from_pandas(pser) self.assert_eq(psser.swapaxes(0, 0), pser.swapaxes(0, 0)) self.assert_eq(psser.swapaxes("index", "index"), pser.swapaxes("index", "index")) self.assert_eq((psser + 1).swapaxes(0, 0), (pser + 1).swapaxes(0, 0)) self.assertRaises(AssertionError, lambda: psser.swapaxes(0, 1, copy=False)) self.assertRaises(ValueError, lambda: psser.swapaxes(0, 1)) self.assertRaises(ValueError, lambda: psser.swapaxes("index", "columns")) def test_div_zero_and_nan(self): pser = pd.Series([100, None, -300, None, 500, -700, np.inf, -np.inf], name="Koalas") psser = ps.from_pandas(pser) self.assert_eq(pser.div(0), psser.div(0)) self.assert_eq(pser.truediv(0), psser.truediv(0)) self.assert_eq(pser / 0, psser / 0) self.assert_eq(pser.div(np.nan), psser.div(np.nan)) self.assert_eq(pser.truediv(np.nan), psser.truediv(np.nan)) self.assert_eq(pser / np.nan, psser / np.nan) # floordiv has different behavior in pandas > 1.0.0 when divide by 0 if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): self.assert_eq(pser.floordiv(0), psser.floordiv(0)) self.assert_eq(pser // 0, psser // 0) else: result = pd.Series( [np.inf, np.nan, -np.inf, np.nan, np.inf, -np.inf, np.inf, -np.inf], name="Koalas" ) self.assert_eq(psser.floordiv(0), result) self.assert_eq(psser // 0, result) self.assert_eq(pser.floordiv(np.nan), psser.floordiv(np.nan)) def test_mad(self): pser = pd.Series([1, 2, 3, 4], name="Koalas") psser = ps.from_pandas(pser) self.assert_eq(pser.mad(), psser.mad()) pser = pd.Series([None, -2, 5, 10, 50, np.nan, -20], name="Koalas") psser = ps.from_pandas(pser) self.assert_eq(pser.mad(), psser.mad()) pmidx = pd.MultiIndex.from_tuples( [("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")] ) pser = pd.Series([1, 2, 3, 4, 5], name="Koalas") pser.index = pmidx psser = ps.from_pandas(pser) self.assert_eq(pser.mad(), psser.mad()) pmidx = pd.MultiIndex.from_tuples( [("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")] ) pser = pd.Series([None, -2, 5, 50, np.nan], name="Koalas") pser.index = pmidx psser = ps.from_pandas(pser) self.assert_eq(pser.mad(), psser.mad()) def test_to_frame(self): pser = pd.Series(["a", "b", "c"]) psser = ps.from_pandas(pser) self.assert_eq(pser.to_frame(name="a"), psser.to_frame(name="a")) # for MultiIndex midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) pser = pd.Series(["a", "b", "c"], index=midx) psser = ps.from_pandas(pser) self.assert_eq(pser.to_frame(name="a"), psser.to_frame(name="a")) def test_shape(self): pser = pd.Series(["a", "b", "c"]) psser = ps.from_pandas(pser) self.assert_eq(pser.shape, psser.shape) # for MultiIndex midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) pser = pd.Series(["a", "b", "c"], index=midx) psser = ps.from_pandas(pser) self.assert_eq(pser.shape, psser.shape) @unittest.skipIf(not have_tabulate, tabulate_requirement_message) def test_to_markdown(self): pser = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") psser = ps.from_pandas(pser) # `to_markdown()` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0. if LooseVersion(pd.__version__) < LooseVersion("1.0.0"): self.assertRaises(NotImplementedError, lambda: psser.to_markdown()) else: self.assert_eq(pser.to_markdown(), psser.to_markdown()) def test_unstack(self): pser = pd.Series( [10, -2, 4, 7], index=pd.MultiIndex.from_tuples( [("one", "a", "z"), ("one", "b", "x"), ("two", "a", "c"), ("two", "b", "v")], names=["A", "B", "C"], ), ) psser = ps.from_pandas(pser) levels = [-3, -2, -1, 0, 1, 2] for level in levels: pandas_result = pser.unstack(level=level) pandas_on_spark_result = psser.unstack(level=level).sort_index() self.assert_eq(pandas_result, pandas_on_spark_result) self.assert_eq(pandas_result.index.names, pandas_on_spark_result.index.names) self.assert_eq(pandas_result.columns.names, pandas_on_spark_result.columns.names) # non-numeric datatypes pser = pd.Series( list("abcd"), index=pd.MultiIndex.from_product([["one", "two"], ["a", "b"]]) ) psser = ps.from_pandas(pser) levels = [-2, -1, 0, 1] for level in levels: pandas_result = pser.unstack(level=level) pandas_on_spark_result = psser.unstack(level=level).sort_index() self.assert_eq(pandas_result, pandas_on_spark_result) self.assert_eq(pandas_result.index.names, pandas_on_spark_result.index.names) self.assert_eq(pandas_result.columns.names, pandas_on_spark_result.columns.names) # Exceeding the range of level self.assertRaises(IndexError, lambda: psser.unstack(level=3)) self.assertRaises(IndexError, lambda: psser.unstack(level=-4)) # Only support for MultiIndex psser = ps.Series([10, -2, 4, 7]) self.assertRaises(ValueError, lambda: psser.unstack()) def test_item(self): psser = ps.Series([10, 20]) self.assertRaises(ValueError, lambda: psser.item()) def test_filter(self): pser = pd.Series([0, 1, 2], index=["one", "two", "three"]) psser = ps.from_pandas(pser) self.assert_eq(pser.filter(items=["one", "three"]), psser.filter(items=["one", "three"])) self.assert_eq(pser.filter(regex="e$"), psser.filter(regex="e$")) self.assert_eq(pser.filter(like="hre"), psser.filter(like="hre")) with self.assertRaisesRegex(ValueError, "Series does not support columns axis."): psser.filter(like="hre", axis=1) # for MultiIndex midx = pd.MultiIndex.from_tuples([("one", "x"), ("two", "y"), ("three", "z")]) pser = pd.Series([0, 1, 2], index=midx) psser = ps.from_pandas(pser) self.assert_eq( pser.filter(items=[("one", "x"), ("three", "z")]), psser.filter(items=[("one", "x"), ("three", "z")]), ) with self.assertRaisesRegex(TypeError, "Unsupported type list"): psser.filter(items=[["one", "x"], ("three", "z")]) with self.assertRaisesRegex(ValueError, "The item should not be empty."): psser.filter(items=[(), ("three", "z")]) def test_abs(self): pser = pd.Series([-2, -1, 0, 1]) psser = ps.from_pandas(pser) self.assert_eq(abs(psser), abs(pser)) self.assert_eq(np.abs(psser), np.abs(pser)) def test_bfill(self): pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]}) psdf = ps.from_pandas(pdf) pser = pdf.x psser = psdf.x self.assert_eq(psser.bfill(), pser.bfill()) self.assert_eq(psser.bfill()[0], pser.bfill()[0]) psser.bfill(inplace=True) pser.bfill(inplace=True) self.assert_eq(psser, pser) self.assert_eq(psser[0], pser[0]) self.assert_eq(psdf, pdf) def test_ffill(self): pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]}) psdf = ps.from_pandas(pdf) pser = pdf.x psser = psdf.x self.assert_eq(psser.ffill(), pser.ffill()) self.assert_eq(psser.ffill()[4], pser.ffill()[4]) psser.ffill(inplace=True) pser.ffill(inplace=True) self.assert_eq(psser, pser) self.assert_eq(psser[4], pser[4]) self.assert_eq(psdf, pdf) def test_iteritems(self): pser = pd.Series(["A", "B", "C"]) psser = ps.from_pandas(pser) for (p_name, p_items), (k_name, k_items) in zip(pser.iteritems(), psser.iteritems()): self.assert_eq(p_name, k_name) self.assert_eq(p_items, k_items) def test_droplevel(self): # droplevel is new in pandas 0.24.0 if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"): pser = pd.Series( [1, 2, 3], index=pd.MultiIndex.from_tuples( [("x", "a", "q"), ("x", "b", "w"), ("y", "c", "e")], names=["level_1", "level_2", "level_3"], ), ) psser = ps.from_pandas(pser) self.assert_eq(pser.droplevel(0), psser.droplevel(0)) self.assert_eq(pser.droplevel("level_1"), psser.droplevel("level_1")) self.assert_eq(pser.droplevel(-1), psser.droplevel(-1)) self.assert_eq(pser.droplevel([0]), psser.droplevel([0])) self.assert_eq(pser.droplevel(["level_1"]), psser.droplevel(["level_1"])) self.assert_eq(pser.droplevel((0,)), psser.droplevel((0,))) self.assert_eq(pser.droplevel(("level_1",)), psser.droplevel(("level_1",))) self.assert_eq(pser.droplevel([0, 2]), psser.droplevel([0, 2])) self.assert_eq( pser.droplevel(["level_1", "level_3"]), psser.droplevel(["level_1", "level_3"]) ) self.assert_eq(pser.droplevel((1, 2)), psser.droplevel((1, 2))) self.assert_eq( pser.droplevel(("level_2", "level_3")), psser.droplevel(("level_2", "level_3")) ) with self.assertRaisesRegex(KeyError, "Level {0, 1, 2} not found"): psser.droplevel({0, 1, 2}) with self.assertRaisesRegex(KeyError, "Level level_100 not found"): psser.droplevel(["level_1", "level_100"]) with self.assertRaisesRegex( IndexError, "Too many levels: Index has only 3 levels, not 11" ): psser.droplevel(10) with self.assertRaisesRegex( IndexError, "Too many levels: Index has only 3 levels, -10 is not a valid level number", ): psser.droplevel(-10) with self.assertRaisesRegex( ValueError, "Cannot remove 3 levels from an index with 3 levels: " "at least one level must be left.", ): psser.droplevel([0, 1, 2]) with self.assertRaisesRegex( ValueError, "Cannot remove 5 levels from an index with 3 levels: " "at least one level must be left.", ): psser.droplevel([1, 1, 1, 1, 1]) # Tupled names pser.index.names = [("a", "1"), ("b", "2"), ("c", "3")] psser = ps.from_pandas(pser) self.assert_eq( pser.droplevel([("a", "1"), ("c", "3")]), psser.droplevel([("a", "1"), ("c", "3")]) ) def test_dot(self): pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) psdf = ps.from_pandas(pdf) self.assert_eq((psdf["b"] * 10).dot(psdf["a"]), (pdf["b"] * 10).dot(pdf["a"])) self.assert_eq((psdf["b"] * 10).dot(psdf), (pdf["b"] * 10).dot(pdf)) self.assert_eq((psdf["b"] * 10).dot(psdf + 1), (pdf["b"] * 10).dot(pdf + 1)) psdf_other = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["x", "y", "z"]) with self.assertRaisesRegex(ValueError, "matrices are not aligned"): psdf["b"].dot(psdf_other) def test_tail(self): pser = pd.Series(range(1000), name="Koalas") psser = ps.from_pandas(pser) self.assert_eq(pser.tail(), psser.tail()) self.assert_eq(pser.tail(10), psser.tail(10)) self.assert_eq(pser.tail(-990), psser.tail(-990)) self.assert_eq(pser.tail(0), psser.tail(0)) self.assert_eq(pser.tail(1001), psser.tail(1001)) self.assert_eq(pser.tail(-1001), psser.tail(-1001)) self.assert_eq((pser + 1).tail(), (psser + 1).tail()) self.assert_eq((pser + 1).tail(10), (psser + 1).tail(10)) self.assert_eq((pser + 1).tail(-990), (psser + 1).tail(-990)) self.assert_eq((pser + 1).tail(0), (psser + 1).tail(0)) self.assert_eq((pser + 1).tail(1001), (psser + 1).tail(1001)) self.assert_eq((pser + 1).tail(-1001), (psser + 1).tail(-1001)) with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"): psser.tail("10") def test_product(self): pser = pd.Series([10, 20, 30, 40, 50]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(), psser.prod()) # Containing NA values pser = pd.Series([10, np.nan, 30, np.nan, 50]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(), psser.prod(), almost=True) # All-NA values pser = pd.Series([np.nan, np.nan, np.nan]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(), psser.prod()) # Empty Series pser = pd.Series([]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(), psser.prod()) # Boolean Series pser = pd.Series([True, True, True]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(), psser.prod()) pser = pd.Series([False, False, False]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(), psser.prod()) pser = pd.Series([True, False, True]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(), psser.prod()) # With `min_count` parameter pser = pd.Series([10, 20, 30, 40, 50]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(min_count=5), psser.prod(min_count=5)) self.assert_eq(pser.prod(min_count=6), psser.prod(min_count=6)) pser = pd.Series([10, np.nan, 30, np.nan, 50]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(min_count=3), psser.prod(min_count=3), almost=True) self.assert_eq(pser.prod(min_count=4), psser.prod(min_count=4)) pser = pd.Series([np.nan, np.nan, np.nan]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(min_count=1), psser.prod(min_count=1)) pser = pd.Series([]) psser = ps.from_pandas(pser) self.assert_eq(pser.prod(min_count=1), psser.prod(min_count=1)) with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"): ps.Series(["a", "b", "c"]).prod() with self.assertRaisesRegex( TypeError, "Could not convert datetime64\\[ns\\] \\(timestamp.*\\) to numeric" ): ps.Series([pd.Timestamp("2016-01-01") for _ in range(3)]).prod() def test_hasnans(self): # BooleanType pser = pd.Series([True, False, True, True]) psser = ps.from_pandas(pser) self.assert_eq(pser.hasnans, psser.hasnans) pser = pd.Series([True, False, np.nan, True]) psser = ps.from_pandas(pser) self.assert_eq(pser.hasnans, psser.hasnans) # TimestampType pser = pd.Series([pd.Timestamp("2020-07-30") for _ in range(3)]) psser = ps.from_pandas(pser) self.assert_eq(pser.hasnans, psser.hasnans) pser = pd.Series([pd.Timestamp("2020-07-30"), np.nan, pd.Timestamp("2020-07-30")]) psser = ps.from_pandas(pser) self.assert_eq(pser.hasnans, psser.hasnans) # DecimalType pser = pd.Series([Decimal("0.1"), Decimal("NaN")]) psser = ps.from_pandas(pser) self.assert_eq(pser.hasnans, psser.hasnans) # empty pser = pd.Series([]) psser = ps.from_pandas(pser) self.assert_eq(pser.hasnans, psser.hasnans) def test_last_valid_index(self): pser = pd.Series([250, 1.5, 320, 1, 0.3, None, None, None, None]) psser = ps.from_pandas(pser) self.assert_eq(pser.last_valid_index(), psser.last_valid_index()) # MultiIndex columns midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) pser.index = midx psser = ps.from_pandas(pser) self.assert_eq(pser.last_valid_index(), psser.last_valid_index()) # Empty Series pser = pd.Series([]) psser = ps.from_pandas(pser) self.assert_eq(pser.last_valid_index(), psser.last_valid_index()) def test_first_valid_index(self): # Empty Series pser = pd.Series([]) psser = ps.from_pandas(pser) self.assert_eq(pser.first_valid_index(), psser.first_valid_index()) def test_factorize(self): pser = pd.Series(["a", "b", "a", "b"]) psser = ps.from_pandas(pser) pcodes, puniques = pser.factorize(sort=True) kcodes, kuniques = psser.factorize() self.assert_eq(pcodes.tolist(), kcodes.to_list()) self.assert_eq(puniques, kuniques) pser = pd.Series([5, 1, 5, 1]) psser = ps.from_pandas(pser) pcodes, puniques = (pser + 1).factorize(sort=True) kcodes, kuniques = (psser + 1).factorize() self.assert_eq(pcodes.tolist(), kcodes.to_list()) self.assert_eq(puniques, kuniques) pser = pd.Series(["a", "b", "a", "b"], name="ser", index=["w", "x", "y", "z"]) psser = ps.from_pandas(pser) pcodes, puniques = pser.factorize(sort=True) kcodes, kuniques = psser.factorize() self.assert_eq(pcodes.tolist(), kcodes.to_list()) self.assert_eq(puniques, kuniques) pser = pd.Series( ["a", "b", "a", "b"], index=pd.MultiIndex.from_arrays([[4, 3, 2, 1], [1, 2, 3, 4]]) ) psser = ps.from_pandas(pser) pcodes, puniques = pser.factorize(sort=True) kcodes, kuniques = psser.factorize() self.assert_eq(pcodes.tolist(), kcodes.to_list()) self.assert_eq(puniques, kuniques) # # Deals with None and np.nan # pser = pd.Series(["a", "b", "a", np.nan]) psser = ps.from_pandas(pser) pcodes, puniques = pser.factorize(sort=True) kcodes, kuniques = psser.factorize() self.assert_eq(pcodes.tolist(), kcodes.to_list()) self.assert_eq(puniques, kuniques) pser = pd.Series([1, None, 3, 2, 1]) psser = ps.from_pandas(pser) pcodes, puniques = pser.factorize(sort=True) kcodes, kuniques = psser.factorize() self.assert_eq(pcodes.tolist(), kcodes.to_list()) self.assert_eq(puniques, kuniques) pser = pd.Series(["a", None, "a"]) psser = ps.from_pandas(pser) pcodes, puniques = pser.factorize(sort=True) kcodes, kuniques = psser.factorize() self.assert_eq(pcodes.tolist(), kcodes.to_list()) self.assert_eq(puniques, kuniques) pser = pd.Series([None, np.nan]) psser = ps.from_pandas(pser) pcodes, puniques = pser.factorize() kcodes, kuniques = psser.factorize() self.assert_eq(pcodes, kcodes.to_list()) # pandas: Float64Index([], dtype='float64') self.assert_eq(pd.Index([]), kuniques) pser = pd.Series([np.nan, np.nan]) psser = ps.from_pandas(pser) pcodes, puniques = pser.factorize() kcodes, kuniques = psser.factorize() self.assert_eq(pcodes, kcodes.to_list()) # pandas: Float64Index([], dtype='float64') self.assert_eq(pd.Index([]), kuniques) # # Deals with na_sentinel # # pandas >= 1.1.2 support na_sentinel=None # pandas >= 0.24 support na_sentinel not to be -1 # pd_below_1_1_2 = LooseVersion(pd.__version__) < LooseVersion("1.1.2") pd_below_0_24 = LooseVersion(pd.__version__) < LooseVersion("0.24") pser = pd.Series(["a", "b", "a", np.nan, None]) psser = ps.from_pandas(pser) pcodes, puniques = pser.factorize(sort=True, na_sentinel=-2) kcodes, kuniques = psser.factorize(na_sentinel=-2) self.assert_eq([0, 1, 0, -2, -2] if pd_below_0_24 else pcodes.tolist(), kcodes.to_list()) self.assert_eq(puniques, kuniques) pcodes, puniques = pser.factorize(sort=True, na_sentinel=2) kcodes, kuniques = psser.factorize(na_sentinel=2) self.assert_eq([0, 1, 0, 2, 2] if pd_below_0_24 else pcodes.tolist(), kcodes.to_list()) self.assert_eq(puniques, kuniques) if not pd_below_1_1_2: pcodes, puniques = pser.factorize(sort=True, na_sentinel=None) kcodes, kuniques = psser.factorize(na_sentinel=None) self.assert_eq(pcodes.tolist(), kcodes.to_list()) # puniques is Index(['a', 'b', nan], dtype='object') self.assert_eq(ps.Index(["a", "b", None]), kuniques) psser = ps.Series([1, 2, np.nan, 4, 5]) # Arrow takes np.nan as null psser.loc[3] = np.nan # Spark takes np.nan as NaN kcodes, kuniques = psser.factorize(na_sentinel=None) pcodes, puniques = psser.to_pandas().factorize(sort=True, na_sentinel=None) self.assert_eq(pcodes.tolist(), kcodes.to_list()) self.assert_eq(puniques, kuniques) def test_pad(self): pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x") psser = ps.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.1"): self.assert_eq(pser.pad(), psser.pad()) # Test `inplace=True` pser.pad(inplace=True) psser.pad(inplace=True) self.assert_eq(pser, psser) else: expected = ps.Series([np.nan, 2, 3, 4, 4, 6], name="x") self.assert_eq(expected, psser.pad()) # Test `inplace=True` psser.pad(inplace=True) self.assert_eq(expected, psser) def test_explode(self): if LooseVersion(pd.__version__) >= LooseVersion("0.25"): pser = pd.Series([[1, 2, 3], [], None, [3, 4]]) psser = ps.from_pandas(pser) self.assert_eq(pser.explode(), psser.explode(), almost=True) # MultiIndex pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")]) psser = ps.from_pandas(pser) self.assert_eq(pser.explode(), psser.explode(), almost=True) # non-array type Series pser = pd.Series([1, 2, 3, 4]) psser = ps.from_pandas(pser) self.assert_eq(pser.explode(), psser.explode()) else: pser = pd.Series([[1, 2, 3], [], None, [3, 4]]) psser = ps.from_pandas(pser) expected = pd.Series([1.0, 2.0, 3.0, None, None, 3.0, 4.0], index=[0, 0, 0, 1, 2, 3, 3]) self.assert_eq(psser.explode(), expected) # MultiIndex pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")]) psser = ps.from_pandas(pser) expected = pd.Series( [1.0, 2.0, 3.0, None, None, 3.0, 4.0], index=pd.MultiIndex.from_tuples( [ ("a", "w"), ("a", "w"), ("a", "w"), ("b", "x"), ("c", "y"), ("d", "z"), ("d", "z"), ] ), ) self.assert_eq(psser.explode(), expected) # non-array type Series pser = pd.Series([1, 2, 3, 4]) psser = ps.from_pandas(pser) expected = pser self.assert_eq(psser.explode(), expected) def test_argsort(self): # Without null values pser = pd.Series([0, -100, 50, 100, 20], index=["A", "B", "C", "D", "E"]) psser = ps.from_pandas(pser) self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index()) self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index()) # MultiIndex pser.index = pd.MultiIndex.from_tuples( [("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")] ) psser = ps.from_pandas(pser) self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index()) self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index()) # With name pser.name = "Koalas" psser = ps.from_pandas(pser) self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index()) self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index()) # Series from Index pidx = pd.Index([4.0, -6.0, 2.0, -100.0, 11.0, 20.0, 1.0, -99.0]) psidx = ps.from_pandas(pidx) self.assert_eq( pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index() ) self.assert_eq( (-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index() ) # Series from Index with name pidx.name = "Koalas" psidx = ps.from_pandas(pidx) self.assert_eq( pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index() ) self.assert_eq( (-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index() ) # Series from DataFrame pdf = pd.DataFrame({"A": [4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]}) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.A.argsort().sort_index(), psdf.A.argsort().sort_index()) self.assert_eq((-pdf.A).argsort().sort_index(), (-psdf.A).argsort().sort_index()) # With null values pser = pd.Series([0, -100, np.nan, 100, np.nan], index=["A", "B", "C", "D", "E"]) psser = ps.from_pandas(pser) self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index()) self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index()) # MultiIndex with null values pser.index = pd.MultiIndex.from_tuples( [("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")] ) psser = ps.from_pandas(pser) self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index()) self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index()) # With name with null values pser.name = "Koalas" psser = ps.from_pandas(pser) self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index()) self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index()) # Series from Index with null values pidx = pd.Index([4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]) psidx = ps.from_pandas(pidx) self.assert_eq( pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index() ) self.assert_eq( (-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index() ) # Series from Index with name with null values pidx.name = "Koalas" psidx = ps.from_pandas(pidx) self.assert_eq( pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index() ) self.assert_eq( (-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index() ) # Series from DataFrame with null values pdf = pd.DataFrame({"A": [4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]}) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.A.argsort().sort_index(), psdf.A.argsort().sort_index()) self.assert_eq((-pdf.A).argsort().sort_index(), (-psdf.A).argsort().sort_index()) def test_argmin_argmax(self): pser = pd.Series( { "Corn Flakes": 100.0, "Almond Delight": 110.0, "Cinnamon Toast Crunch": 120.0, "Cocoa Puff": 110.0, "Expensive Flakes": 120.0, "Cheap Flakes": 100.0, }, name="Koalas", ) psser = ps.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.0"): self.assert_eq(pser.argmin(), psser.argmin()) self.assert_eq(pser.argmax(), psser.argmax()) # MultiIndex pser.index = pd.MultiIndex.from_tuples( [("a", "t"), ("b", "u"), ("c", "v"), ("d", "w"), ("e", "x"), ("f", "u")] ) psser = ps.from_pandas(pser) self.assert_eq(pser.argmin(), psser.argmin()) self.assert_eq(pser.argmax(), psser.argmax()) # Null Series self.assert_eq(pd.Series([np.nan]).argmin(), ps.Series([np.nan]).argmin()) self.assert_eq(pd.Series([np.nan]).argmax(), ps.Series([np.nan]).argmax()) else: self.assert_eq(pser.values.argmin(), psser.argmin()) self.assert_eq(pser.values.argmax(), psser.argmax()) # MultiIndex pser.index = pd.MultiIndex.from_tuples( [("a", "t"), ("b", "u"), ("c", "v"), ("d", "w"), ("e", "x"), ("f", "u")] ) psser = ps.from_pandas(pser) self.assert_eq(pser.values.argmin(), psser.argmin()) self.assert_eq(pser.values.argmax(), psser.argmax()) # Null Series self.assert_eq(-1, ps.Series([np.nan]).argmin()) self.assert_eq(-1, ps.Series([np.nan]).argmax()) with self.assertRaisesRegex(ValueError, "attempt to get argmin of an empty sequence"): ps.Series([]).argmin() with self.assertRaisesRegex(ValueError, "attempt to get argmax of an empty sequence"): ps.Series([]).argmax() def test_backfill(self): pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x") psser = ps.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.1"): self.assert_eq(pser.backfill(), psser.backfill()) # Test `inplace=True` pser.backfill(inplace=True) psser.backfill(inplace=True) self.assert_eq(pser, psser) else: expected = ps.Series([2.0, 2.0, 3.0, 4.0, 6.0, 6.0], name="x") self.assert_eq(expected, psser.backfill()) # Test `inplace=True` psser.backfill(inplace=True) self.assert_eq(expected, psser) def test_align(self): pdf = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) psdf = ps.from_pandas(pdf) for join in ["outer", "inner", "left", "right"]: for axis in [None, 0]: psser_l, psser_r = psdf.a.align(psdf.b, join=join, axis=axis) pser_l, pser_r = pdf.a.align(pdf.b, join=join, axis=axis) self.assert_eq(psser_l, pser_l) self.assert_eq(psser_r, pser_r) psser_l, psdf_r = psdf.b.align(psdf[["b", "a"]], join=join, axis=axis) pser_l, pdf_r = pdf.b.align(pdf[["b", "a"]], join=join, axis=axis) self.assert_eq(psser_l, pser_l) self.assert_eq(psdf_r, pdf_r) self.assertRaises(ValueError, lambda: psdf.a.align(psdf.b, axis=1)) def test_pow_and_rpow(self): pser = pd.Series([1, 2, np.nan]) psser = ps.from_pandas(pser) self.assert_eq(pser.pow(np.nan), psser.pow(np.nan)) self.assert_eq(pser ** np.nan, psser ** np.nan) self.assert_eq(pser.rpow(np.nan), psser.rpow(np.nan)) self.assert_eq(1 ** pser, 1 ** psser) def test_between_time(self): idx = pd.date_range("2018-04-09", periods=4, freq="1D20min") pser = pd.Series([1, 2, 3, 4], index=idx) psser = ps.from_pandas(pser) self.assert_eq( pser.between_time("0:15", "0:45").sort_index(), psser.between_time("0:15", "0:45").sort_index(), ) pser.index.name = "ts" psser = ps.from_pandas(pser) self.assert_eq( pser.between_time("0:15", "0:45").sort_index(), psser.between_time("0:15", "0:45").sort_index(), ) pser.index.name = "index" psser = ps.from_pandas(pser) self.assert_eq( pser.between_time("0:15", "0:45").sort_index(), psser.between_time("0:15", "0:45").sort_index(), ) def test_at_time(self): idx = pd.date_range("2018-04-09", periods=4, freq="1D20min") pser = pd.Series([1, 2, 3, 4], index=idx) psser = ps.from_pandas(pser) self.assert_eq( pser.at_time("0:20").sort_index(), psser.at_time("0:20").sort_index(), ) pser.index.name = "ts" psser = ps.from_pandas(pser) self.assert_eq( pser.at_time("0:20").sort_index(), psser.at_time("0:20").sort_index(), ) pser.index.name = "index" psser = ps.from_pandas(pser) self.assert_eq( pser.at_time("0:20").sort_index(), psser.at_time("0:20").sort_index(), ) def test_apply(self): psser = self.psser def udf(col) -> ps.Series[int]: return col + 10 with self.assertRaisesRegex( ValueError, r"Expected the return type of this function to be of scalar type, " r"but found type SeriesType\[LongType\]", ): psser.apply(udf) def test_combine_first(self): pdf = pd.DataFrame( { "A": {"falcon": 330.0, "eagle": 160.0}, "B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0}, } ) pser1, pser2 = pdf.A, pdf.B psdf = ps.from_pandas(pdf) psser1, psser2 = psdf.A, psdf.B self.assert_eq(psser1.combine_first(psser2), pser1.combine_first(pser2)) psser1.name = pser1.name = ("X", "A") psser2.name = pser2.name = ("Y", "B") self.assert_eq(psser1.combine_first(psser2), pser1.combine_first(pser2)) def test_cov(self): pdf = pd.DataFrame( { "s1": ["a", "b", "c"], "s2": [0.12528585, 0.26962463, 0.51111198], }, index=[0, 1, 2], ) psdf = ps.from_pandas(pdf) with self.assertRaisesRegex(TypeError, "unsupported dtype: object"): psdf["s1"].cov(psdf["s2"]) pdf = pd.DataFrame( { "s1": [0.90010907, 0.13484424, 0.62036035], "s2": [0.12528585, 0.26962463, 0.51111198], }, index=[0, 1, 2], ) self._test_cov(pdf) pdf = pd.DataFrame( { "s1": [0.90010907, np.nan, 0.13484424, 0.62036035], "s2": [0.12528585, 0.81131178, 0.26962463, 0.51111198], }, index=[0, 1, 2, 3], ) self._test_cov(pdf) def _test_cov(self, pdf): psdf = ps.from_pandas(pdf) pcov = pdf["s1"].cov(pdf["s2"]) pscov = psdf["s1"].cov(psdf["s2"]) self.assert_eq(pcov, pscov, almost=True) pcov = pdf["s1"].cov(pdf["s2"], min_periods=3) pscov = psdf["s1"].cov(psdf["s2"], min_periods=3) self.assert_eq(pcov, pscov, almost=True) pcov = pdf["s1"].cov(pdf["s2"], min_periods=4) pscov = psdf["s1"].cov(psdf["s2"], min_periods=4) self.assert_eq(pcov, pscov, almost=True) def test_eq(self): pser = pd.Series([1, 2, 3, 4, 5, 6], name="x") psser = ps.from_pandas(pser) # other = Series self.assert_eq(pser.eq(pser), psser.eq(psser)) self.assert_eq(pser == pser, psser == psser) # other = dict other = {1: None, 2: None, 3: None, 4: None, np.nan: None, 6: None} self.assert_eq(pser.eq(other), psser.eq(other)) self.assert_eq(pser == other, psser == other) # other = set other = {1, 2, 3, 4, np.nan, 6} self.assert_eq(pser.eq(other), psser.eq(other)) self.assert_eq(pser == other, psser == other) # other = list other = [np.nan, 1, 3, 4, np.nan, 6] if LooseVersion(pd.__version__) >= LooseVersion("1.2"): self.assert_eq(pser.eq(other), psser.eq(other).sort_index()) self.assert_eq(pser == other, (psser == other).sort_index()) else: self.assert_eq(pser.eq(other).rename("x"), psser.eq(other).sort_index()) self.assert_eq((pser == other).rename("x"), (psser == other).sort_index()) # other = tuple other = (np.nan, 1, 3, 4, np.nan, 6) if LooseVersion(pd.__version__) >= LooseVersion("1.2"): self.assert_eq(pser.eq(other), psser.eq(other).sort_index()) self.assert_eq(pser == other, (psser == other).sort_index()) else: self.assert_eq(pser.eq(other).rename("x"), psser.eq(other).sort_index()) self.assert_eq((pser == other).rename("x"), (psser == other).sort_index()) # other = list with the different length other = [np.nan, 1, 3, 4, np.nan] with self.assertRaisesRegex(ValueError, "Lengths must be equal"): psser.eq(other) with self.assertRaisesRegex(ValueError, "Lengths must be equal"): psser == other # other = tuple with the different length other = (np.nan, 1, 3, 4, np.nan) with self.assertRaisesRegex(ValueError, "Lengths must be equal"): psser.eq(other) with self.assertRaisesRegex(ValueError, "Lengths must be equal"): psser == other if __name__ == "__main__": from pyspark.pandas.tests.test_series import * # noqa: F401 try: import xmlrunner # type: ignore[import] testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
nchammas/spark
python/pyspark/pandas/tests/test_series.py
Python
apache-2.0
128,270
[ "Elk" ]
204e1ca3a09dff986a57d66575dfd3ecb6bfad80b09d7aebc4e2f1585fb4c6e4
"""a prototype multiobjective opt under uncertainty algorithm """ import os import numpy as np import pandas as pd import pyemu from .ensemble_method import EnsembleMethod class ParetoObjFunc(object): """multiobjective function calculator.""" def __init__(self, pst, obj_function_dict, logger): self.logger = logger self.pst = pst self.max_distance = 1.0e30 obs = pst.observation_data pi = pst.prior_information self.obs_dict, self.pi_dict = {}, {} for name, direction in obj_function_dict.items(): if name in obs.obsnme: if direction.lower().startswith("max"): self.obs_dict[name] = "max" elif direction.lower().startswith("min"): self.obs_dict[name] = "min" else: self.logger.lraise( "unrecognized direction for obs obj func {0}:'{1}'".format( name, direction ) ) elif name in pi.pilbl: if direction.lower().startswith("max"): self.pi_dict[name] = "max" elif direction.lower().startswith("min"): self.pi_dict[name] = "min" else: self.logger.lraise( "unrecognized direction for pi obj func {0}:'{1}'".format( name, direction ) ) else: self.logger.lraise("objective function not found:{0}".format(name)) if len(self.pi_dict) > 0: self.logger.lraise("pi obj function not yet supported") self.logger.statement( "{0} obs objective functions registered".format(len(self.obs_dict)) ) for name, direction in self.obs_dict.items(): self.logger.statement( "obs obj function: {0}, direction: {1}".format(name, direction) ) self.logger.statement( "{0} pi objective functions registered".format(len(self.pi_dict)) ) for name, direction in self.pi_dict.items(): self.logger.statement( "pi obj function: {0}, direction: {1}".format(name, direction) ) self.is_nondominated = self.is_nondominated_continuous self.obs_obj_names = list(self.obs_dict.keys()) def is_feasible(self, obs_df, risk=0.5): """identify which candidate solutions in obs_df (rows) are feasible with respect obs constraints (obs_df) Parameters ---------- obs_df : pandas.DataFrame a dataframe with columns of obs names and rows of realizations risk : float risk value. If != 0.5, then risk shifting is used. Otherwise, the obsval in Pst is used. Default is 0.5. Returns ------- is_feasible : pandas.Series series with obs_df.index and bool values """ # todo deal with pi eqs is_feasible = pd.Series(data=True, index=obs_df.index) for lt_obs in self.pst.less_than_obs_constraints: if risk != 0.5: val = self.get_risk_shifted_value(risk, obs_df.loc[lt_obs]) else: val = self.pst.observation_data.loc[lt_obs, "obsval"] is_feasible.loc[obs_df.loc[:, lt_obs] >= val] = False for gt_obs in self.pst.greater_than_obs_constraints: if risk != 0.5: val = self.get_risk_shifted_value(risk, obs_df.loc[gt_obs]) else: val = self.pst.observation_data.loc[gt_obs, "obsval"] is_feasible.loc[obs_df.loc[:, gt_obs] <= val] = False return is_feasible @property def obs_obj_signs(self): signs = [] for obj in self.obs_obj_names: if self.obs_dict[obj] == "max": signs.append(1.0) else: signs.append(-1.0) signs = np.array(signs) return signs def dominates(self, sol1, sol2): d = self.obs_obj_signs * (sol1 - sol2) if np.all(d >= 0.0) and np.any(d > 0.0): return True return False def is_nondominated_pathetic(self, obs_df): """identify which candidate solutions are pareto non-dominated - super patheically slow... Parameters ---------- obs_df : pandas.DataFrame dataframe with columns of observation names and rows of realizations Returns ------- is_dominated : pandas.Series series with index of obs_df and bool series """ obj_df = obs_df.loc[:, self.obs_obj_names] is_nondom = [] for i, iidx in enumerate(obj_df.index): ind = True for jidx in obj_df.index: if iidx == jidx: continue # if dominates(jidx,iidx): # ind = False # break if self.dominates(obj_df.loc[jidx, :], obj_df.loc[iidx, :]): ind = False break is_nondom.append(ind) is_nondom = pd.Series(data=is_nondom, index=obs_df.index, dtype=bool) return is_nondom def is_nondominated_continuous(self, obs_df): """identify which candidate solutions are pareto non-dominated continuously updated, but still slow Parameters ---------- obs_df : pandas.DataFrame dataframe with columns of observation names and rows of realizations Returns ------- is_dominated : pandas.Series series with index of obs_df and bool series """ obj_df = obs_df.loc[:, self.obs_obj_names] P = list(obj_df.index) PP = set() PP.add(P[0]) # iidx = 1 # while iidx < len(P): for iidx in P: jidx = 0 drop = [] keep = True for jidx in PP: # if dominates(iidx,jidx): # drop.append(jidx) # elif dominates(jidx,iidx): # keep = False # break if jidx == iidx: continue if self.dominates(obj_df.loc[iidx, :], obj_df.loc[jidx, :]): drop.append(jidx) elif self.dominates(obj_df.loc[jidx, :], obj_df.loc[iidx, :]): keep = False break for d in drop: PP.remove(d) if keep: PP.add(iidx) # iidx += 1 is_nondom = pd.Series(data=False, index=obs_df.index, dtype=bool) is_nondom.loc[PP] = True return is_nondom def is_nondominated_kung(self, obs_df): """identify which candidate solutions are pareto non-dominated using Kungs algorithm Parameters ---------- obs_df : pandas.DataFrame dataframe with columns of observation names and rows of realizations Returns ------- is_dominated : pandas.Series series with index of obs_df and bool series """ obj_df = obs_df.loc[:, self.obs_obj_names] obj_names = self.obs_obj_names ascending = False if self.obs_dict[obj_names[0]] == "min": ascending = True obj_df.sort_values(by=obj_names[0], ascending=ascending, inplace=True) P = list(obj_df.index) def front(p): if len(p) == 1: return p p = list( obj_df.loc[p, :].sort_values(by=obj_names[0], ascending=ascending).index ) half = int(len(p) / 2) T = front(p[:half]) B = front(p[half:]) M = [] i = 0 while i < len(B): j = 0 while j < len(T): # if dominates(T[j],B[i]): if self.dominates(obj_df.loc[T[j], :], obj_df.loc[B[i], :]): break j += 1 if j == len(T): M.append(B[i]) i += 1 T.extend(M) return T PP = front(P) is_nondom = pd.Series(data=False, index=obs_df.index, dtype=bool) is_nondom.loc[PP] = True return is_nondom def crowd_distance(self, obs_df): """determine the crowding distance for each candidate solution Parameters ---------- obs_df : pandas.DataFrame dataframe with columns of observation names and rows of realizations Returns ------- crowd_distance : pandas.Series series with index of obs_df and values of crowd distance """ # initialize the distance container crowd_distance = pd.Series(data=0.0, index=obs_df.index) for name, direction in self.obs_dict.items(): # make a copy - wasteful, but easier obj_df = obs_df.loc[:, name].copy() # sort so that largest values are first obj_df.sort_values(ascending=False, inplace=True) # set the ends so they are always retained crowd_distance.loc[obj_df.index[0]] += self.max_distance crowd_distance.loc[obj_df.index[-1]] += self.max_distance # process the vector i = 1 for idx in obj_df.index[1:-1]: crowd_distance.loc[idx] += obj_df.iloc[i - 1] - obj_df.iloc[i + 1] i += 1 return crowd_distance def get_risk_shifted_value(self, risk, series): n = series.name if n in self.obs_dict.keys(): d = self.obs_dict[n] t = "obj" elif n in self.pst.less_than_obs_constraints: d = "min" t = "lt_obs" elif n in self.pst.greater_than_obs_constraints: d = "max" t = "gt_obs" else: self.logger.lraise( "series is not an obs obj func or obs inequality contraint:{0}".format( n ) ) ascending = False if d == "min": ascending = True s = series.shape[0] shift = int(s * risk) if shift >= s: shift = s - 1 cdf = series.sort_values(ascending=ascending).apply(np.cumsum) val = float(cdf.iloc[shift]) # print(cdf) # print(shift,cdf.iloc[shift]) # self.logger.statement("risk-shift for {0}->type:{1}dir:{2},shift:{3},val:{4}".format(n,t,d,shift,val)) return val def reduce_stack_with_risk_shift(self, oe, num_reals, risk): stochastic_cols = list(self.obs_dict.keys()) stochastic_cols.extend(self.pst.less_than_obs_constraints) stochastic_cols.extend(self.pst.greater_than_obs_constraints) stochastic_cols = set(stochastic_cols) vvals = [] for i in range(0, oe.shape[0], num_reals): oes = oe.iloc[i : i + num_reals] vals = [] for col in oes.columns: if col in stochastic_cols: val = self.get_risk_shifted_value(risk=risk, series=oes.loc[:, col]) # otherwise, just fill with the mean value else: val = oes.loc[:, col].mean() vals.append(val) vvals.append(vals) df = pd.DataFrame(data=vvals, columns=oe.columns) return df class EvolAlg(EnsembleMethod): def __init__( self, pst, parcov=None, obscov=None, num_workers=0, use_approx_prior=True, submit_file=None, verbose=False, port=4004, worker_dir="template", ): super(EvolAlg, self).__init__( pst=pst, parcov=parcov, obscov=obscov, num_workers=num_workers, submit_file=submit_file, verbose=verbose, port=port, worker_dir=worker_dir, ) def initialize( self, obj_func_dict, num_par_reals=100, num_dv_reals=100, dv_ensemble=None, par_ensemble=None, risk=0.5, dv_names=None, par_names=None, ): # todo : setup a run results store for all candidate solutions? or maybe # just nondom, feasible solutions? # todo : check that the dv ensemble index is not duplicated self.dv_ensemble_archive = None self.obs_ensemble_archive = None if risk != 0.5: if risk > 1.0 or risk < 0.0: self.logger.lraise("risk not in 0.0:1.0 range") self.risk = risk self.obj_func = ParetoObjFunc(self.pst, obj_func_dict, self.logger) self.par_ensemble = None # all adjustable pars are dec vars if dv_ensemble is None and par_ensemble is None: self.num_dv_reals = num_dv_reals if dv_names is not None: aset = set(self.pst.adj_par_names) dvset = set(dv_names) diff = dvset - aset if len(diff) > 0: self.logger.lraise( "the following dv_names were not " + "found in the adjustable parameters: {0}".format( ",".join(diff) ) ) how = {p: "uniform" for p in dv_names} else: if risk != 0.5: self.logger.lraise( "risk != 0.5 but all adjustable pars are dec vars" ) how = {p: "uniform" for p in self.pst.adj_par_names} self.dv_ensemble = pyemu.ParameterEnsemble.from_mixed_draws( self.pst, how_dict=how, num_reals=num_dv_reals, cov=self.parcov ) if risk != 0.5: aset = set(self.pst.adj_par_names) dvset = set(self.dv_ensemble.columns) diff = aset - dvset if len(diff) > 0: self.logger.lraise( "risk!=0.5 but all adjustable parameters are dec vars" ) self.par_ensemble = pyemu.ParameterEnsemble.from_gaussian_draw( self.pst, num_reals=num_par_reals, cov=self.parcov ) else: self.par_ensemble = None # both par ensemble and dv ensemble were passed elif par_ensemble is not None and dv_ensemble is not None: self.num_dv_reals = dv_ensemble.shape[0] aset = set(self.pst.adj_par_names) ppset = set(self.pst.par_names) dvset = set(dv_ensemble.columns) pset = set(par_ensemble.columns) diff = ppset - aset if len(diff) > 0: self.logger.lraise( "the following par_ensemble names were not " + "found in the pst par names: {0}".format(",".join(diff)) ) if len(diff) > 0: self.logger.lraise( "the following dv_ensemble names were not " + "found in the adjustable parameters: {0}".format(",".join(diff)) ) self.par_ensemble = par_ensemble self.dv_ensemble = dv_ensemble # dv_ensemble supplied, but not pars, so check if any adjustable pars are not # in dv_ensemble, and if so, draw reals for them elif dv_ensemble is not None and par_ensemble is None: self.num_dv_reals = dv_ensemble.shape[0] aset = set(self.pst.adj_par_names) dvset = set(dv_ensemble.columns) diff = dvset - aset if len(diff) > 0: self.logger.lraise( "the following dv_ensemble names were not " + "found in the adjustable parameters: {0}".format(",".join(diff)) ) self.dv_ensemble = dv_ensemble if risk != 0.5: if par_names is not None: pset = set(par_names) diff = pset - aset if len(diff) > 0: self.logger.lraise( "the following par_names were not " + "found in the adjustable parameters: {0}".format( ",".join(diff) ) ) how = {p: "gaussian" for p in par_names} else: adj_pars = aset - dvset if len(adj_pars) == 0: self.logger.lraise( "risk!=0.5 but all adjustable pars are dec vars" ) how = {p: "gaussian" for p in adj_pars} self.par_ensemble = pyemu.ParameterEnsemble.from_mixed_draws( self.pst, how_dict=how, num_reals=num_par_reals, cov=self.parcov ) else: diff = aset - dvset if len(diff) > 0: self.logger.warn( "adj pars {0} missing from dv_ensemble".format(",".join(diff)) ) df = pd.DataFrame(self.pst.parameter_data.loc[:, "parval1"]).T self.par_ensemble = pyemu.ParameterEnsemble.from_dataframe( df=df, pst=self.pst ) print(self.par_ensemble.shape) # par ensemble supplied but not dv_ensmeble, so check for any adjustable pars # that are not in par_ensemble and draw reals. Must be at least one... elif par_ensemble is not None and dv_ensemble is None: self.num_dv_reals = num_dv_reals aset = set(self.pst.par_names) pset = set(par_ensemble.columns) diff = aset - pset if len(diff) > 0: self.logger.lraise( "the following par_ensemble names were not " + "found in the pst par names: {0}".format(",".join(diff)) ) self.par_ensemble = par_ensemble if dv_names is None: self.logger.lraise( "dv_names must be passed if dv_ensemble is None and par_ensmeble is not None" ) dvset = set(dv_names) diff = dvset - aset if len(diff) > 0: self.logger.lraise( "the following dv_names were not " + "found in the adjustable parameters: {0}".format(",".join(diff)) ) how = {p: "uniform" for p in dv_names} self.dv_ensemble = pyemu.ParameterEnsemble.from_mixed_draws( self.pst, how_dict=how, num_reals=num_dv_reals, cov=self.parcov, partial=True, ) self.last_stack = None self.logger.log( "evaluate initial dv ensemble of size {0}".format(self.dv_ensemble.shape[0]) ) self.obs_ensemble = self._calc_obs(self.dv_ensemble) self.logger.log( "evaluate initial dv ensemble of size {0}".format(self.dv_ensemble.shape[0]) ) isfeas = self.obj_func.is_feasible(self.obs_ensemble, risk=self.risk) isnondom = self.obj_func.is_nondominated(self.obs_ensemble) vc = isfeas.value_counts() if True not in vc: self.logger.lraise("no feasible solutions in initial population") self.logger.statement( "{0} feasible individuals in initial population".format(vc[True]) ) self.dv_ensemble = self.dv_ensemble.loc[isfeas, :] self.obs_ensemble = self.obs_ensemble.loc[isfeas, :] vc = isnondom.value_counts() if True in vc: self.logger.statement( "{0} nondominated solutions in initial population".format(vc[True]) ) else: self.logger.statement("no nondominated solutions in initial population") self.dv_ensemble = self.dv_ensemble.loc[isfeas, :] self.obs_ensemble = self.obs_ensemble.loc[isfeas, :] self.pst.add_transform_columns() self._initialized = True @staticmethod def _drop_failed(failed_runs, dv_ensemble, obs_ensemble): if failed_runs is None: return dv_ensemble.loc[failed_runs, :] = np.NaN dv_ensemble = dv_ensemble.dropna(axis=1) obs_ensemble.loc[failed_runs, :] = np.NaN obs_ensemble = obs_ensemble.dropna(axis=1) self.logger.statement( "dropped {0} failed runs, {1} remaining".format( len(failed_runs), dv_ensemble.shape[0] ) ) def _archive(self, dv_ensemble, obs_ensemble): self.logger.log("archiving {0} solutions".format(dv_ensemble.shape[0])) if dv_ensemble.shape[0] != obs_ensemble.shape[0]: self.logger.lraise( "EvolAlg._archive() error: shape mismatch: {0} : {1}".format( dv_ensemble.shape[0], obs_ensemble.shape[0] ) ) obs_ensemble = obs_ensemble.copy() dv_ensemble = dv_ensemble.copy() isfeas = self.obj_func.is_feasible(obs_ensemble) isnondom = self.obj_func.is_nondominated(obs_ensemble) cd = self.obj_func.crowd_distance(obs_ensemble) obs_ensemble.loc[isfeas.index, "feasible"] = isfeas obs_ensemble.loc[isnondom.index, "nondominated"] = isnondom dv_ensemble.loc[isfeas.index, "feasible"] = isfeas dv_ensemble.loc[isnondom.index, "nondominated"] = isnondom obs_ensemble.loc[:, "iteration"] = self.iter_num dv_ensemble.loc[:, "iteration"] = self.iter_num obs_ensemble.loc[cd.index, "crowd_distance"] = cd dv_ensemble.loc[cd.index, "crowd_distance"] = cd if self.obs_ensemble_archive is None: self.obs_ensemble_archive = obs_ensemble._df.loc[:, :] self.dv_ensemble_archive = dv_ensemble._df.loc[:, :] else: self.obs_ensemble_archive = self.obs_ensemble_archive.append( obs_ensemble._df.loc[:, :] ) self.dv_ensemble_archive = self.dv_ensemble_archive.append( dv_ensemble.loc[:, :] ) def _calc_obs(self, dv_ensemble): if self.par_ensemble is None: failed_runs, oe = super(EvolAlg, self)._calc_obs(dv_ensemble) else: # make a copy of the org par ensemble but as a df instance df_base = self.par_ensemble._df.loc[:, :] # stack up the par ensembles for each solution dfs = [] for i in range(dv_ensemble.shape[0]): solution = dv_ensemble.iloc[i, :] df = df_base.copy() df.loc[:, solution.index] = solution.values dfs.append(df) df = pd.concat(dfs) # reset with a range index org_index = df.index.copy() df.index = np.arange(df.shape[0]) failed_runs, oe = super(EvolAlg, self)._calc_obs(df) if oe.shape[0] != dv_ensemble.shape[0] * self.par_ensemble.shape[0]: self.logger.lraise("wrong number of runs back from stack eval") EvolAlg._drop_failed(failed_runs, dv_ensemble, oe) self.last_stack = oe.copy() self.logger.log("reducing initial stack evaluation") df = self.obj_func.reduce_stack_with_risk_shift( oe, self.par_ensemble.shape[0], self.risk ) self.logger.log("reducing initial stack evaluation") # big assumption the run results are in the same order df.index = dv_ensemble.index oe = pyemu.ObservationEnsemble.from_dataframe(df=df, pst=self.pst) self._archive(dv_ensemble, oe) return oe def update(self, *args, **kwargs): self.logger.lraise("EvolAlg.update() must be implemented by derived types") class EliteDiffEvol(EvolAlg): def __init__( self, pst, parcov=None, obscov=None, num_workers=0, use_approx_prior=True, submit_file=None, verbose=False, port=4004, worker_dir="template", ): super(EliteDiffEvol, self).__init__( pst=pst, parcov=parcov, obscov=obscov, num_workers=num_workers, submit_file=submit_file, verbose=verbose, port=port, worker_dir=worker_dir, ) def update(self, mut_base=0.8, cross_over_base=0.7, num_dv_reals=None): if not self._initialized: self.logger.lraise("not initialized") if num_dv_reals is None: num_dv_reals = self.num_dv_reals if self.dv_ensemble.shape[0] < 4: self.logger.lraise("not enough individuals in population to continue") # function to get unique index names self._child_count = 0 def next_name(): while True: sol_name = "c_i{0}_{1}".format(self.iter_num, self._child_count) if sol_name not in self.dv_ensemble.index.values: break self._child_count += 1 return sol_name # generate self.num_dv_reals offspring using diff evol rules dv_offspring = [] child2parent = {} offspring_idx = [] tol = 1.0 num_dv = self.dv_ensemble.shape[1] dv_names = self.dv_ensemble.columns dv_log = self.pst.parameter_data.loc[dv_names, "partrans"] == "log" lb = self.pst.parameter_data.loc[dv_names, "parlbnd"].copy() ub = self.pst.parameter_data.loc[dv_names, "parubnd"].copy() lb.loc[dv_log] = lb.loc[dv_log].apply(np.log10) ub.loc[dv_log] = ub.loc[dv_log].apply(np.log10) dv_ensemble_trans = self.dv_ensemble.copy() for idx in dv_ensemble_trans.index: dv_ensemble_trans.loc[idx, dv_log] = dv_ensemble_trans.loc[ idx, dv_log ].apply(lambda x: np.log10(x)) for i in range(num_dv_reals): # every parent gets an offspring if i < self.dv_ensemble.shape[0]: parent_idx = i mut = mut_base cross_over = cross_over_base else: # otherwise, some parents get more than one offspring # could do something better here - like pick a good parent # make a wild child parent_idx = np.random.randint(0, dv_ensemble_trans.shape[0]) mut = 0.9 cross_over = 0.9 parent = dv_ensemble_trans.iloc[parent_idx, :] # select the three other members in the population abc_idxs = np.random.choice(dv_ensemble_trans.index, 3, replace=False) abc = dv_ensemble_trans.loc[abc_idxs, :].copy() mutant = abc.iloc[0] + (mut * (abc.iloc[1] - abc.iloc[2])) # select cross over genes (dec var values) cross_points = np.random.rand(num_dv) < cross_over if not np.any(cross_points): cross_points[np.random.randint(0, num_dv)] = True # create an offspring offspring = parent._df.copy() offspring.loc[cross_points] = mutant.loc[cross_points] # enforce bounds out = offspring > ub offspring.loc[out] = ub.loc[out] out = offspring < lb offspring.loc[out] = lb.loc[out] # back transform offspring.loc[dv_log] = 10.0 ** offspring.loc[dv_log] offspring = offspring.loc[self.dv_ensemble.columns] sol_name = "c_{0}".format(i) dv_offspring.append(offspring) offspring_idx.append(sol_name) child2parent[sol_name] = dv_ensemble_trans.index[parent_idx] dv_offspring = pd.DataFrame( dv_offspring, columns=self.dv_ensemble.columns, index=offspring_idx ) # run the model with offspring candidates self.logger.log( "running {0} canditiate solutions for iteration {1}".format( dv_offspring.shape[0], self.iter_num ) ) obs_offspring = self._calc_obs(dv_offspring) # evaluate offspring fitness WRT feasibility and nondomination (elitist) - # if offspring dominates parent, replace in # self.dv_ensemble and self.obs_ensemble. if not, drop candidate. # If tied, keep both isfeas = self.obj_func.is_feasible(obs_offspring) isnondom = self.obj_func.is_nondominated(obs_offspring) for child_idx in obs_offspring.index: if not isfeas[child_idx]: self.logger.statement("child {0} is not feasible".format(child_idx)) continue child_sol = obs_offspring.loc[child_idx, :] parent_idx = child2parent[child_idx] if parent_idx is None: # the parent was already removed by another child, so if this child is # feasible and nondominated, keep it if isnondom(child_idx): self.logger.statement( "orphaned child {0} retained".format(child_idx) ) sol_name = next_name() self.dv_ensemble.loc[sol_name, child_sol.index] = child_sol self.obs_ensemble.loc[ sol_name, obs_offspring.columns ] = obs_offspring.loc[child_idx, :] else: parent_sol = self.obs_ensemble.loc[parent_idx, :] if self.obj_func.dominates( parent_sol.loc[self.obj_func.obs_obj_names], child_sol.loc[self.obj_func.obs_obj_names], ): self.logger.statement( "child {0} dominated by parent {1}".format( child_idx, parent_idx ) ) # your dead to me! pass elif self.obj_func.dominates( child_sol.loc[self.obj_func.obs_obj_names], parent_sol.loc[self.obj_func.obs_obj_names], ): # hey dad, what do you think about your son now! self.logger.statement( "child {0} dominates parent {1}".format(child_idx, parent_idx) ) self.dv_ensemble.loc[ parent_idx, dv_offspring.columns ] = dv_offspring.loc[child_idx, :] self.obs_ensemble._df.loc[ parent_idx, obs_offspring.columns ] = obs_offspring._df.loc[child_idx, :] child2parent[idx] = None else: self.logger.statement( "child {0} and parent {1} kept".format(child_idx, parent_idx) ) sol_name = next_name() self.dv_ensemble.loc[ sol_name, dv_offspring.columns ] = dv_offspring.loc[child_idx, :] self.obs_ensemble._df.loc[ sol_name, obs_offspring.columns ] = obs_offspring._df.loc[child_idx, :] # if there are too many individuals in self.dv_ensemble, # first drop dominated,then reduce by using crowding distance. # self.logger.statement("number of solutions:{0}".format(self.dv_ensemble.shape[0])) isnondom = self.obj_func.is_nondominated(self.obs_ensemble) dom_idx = isnondom.loc[isnondom == False].index nondom_idx = isnondom.loc[isnondom == True].index self.logger.statement( "number of dominated solutions:{0}".format(dom_idx.shape[0]) ) # self.logger.statement("nondominated solutions: {0}".format(','.join(nondom_idx))) self.logger.statement("dominated solutions: {0}".format(",".join(str(dom_idx)))) ndrop = self.dv_ensemble.shape[0] - num_dv_reals if ndrop > 0: isnondom = self.obj_func.is_nondominated(self.obs_ensemble) vc = isnondom.value_counts() # if there a dominated solutions, drop those first, using # crowding distance as the order if False in vc.index: # get dfs for the dominated solutions dv_dom = self.dv_ensemble.loc[dom_idx, :].copy() obs_dom = self.obs_ensemble.loc[dom_idx, :].copy() self.dv_ensemble.drop(dom_idx, inplace=True) self.obs_ensemble.drop(dom_idx, inplace=True) self.logger.statement( "dropping {0} dominated individuals based on crowd distance".format( min(ndrop, dv_dom.shape[0]) ) ) self._drop_by_crowd(dv_dom, obs_dom, min(ndrop, dv_dom.shape[0])) # add any remaining dominated solutions back self.dv_ensemble = self.dv_ensemble.append(dv_dom._df) self.obs_ensemble = self.obs_ensemble.append(obs_dom._df) # drop remaining nondom solutions as needed if self.dv_ensemble.shape[0] > num_dv_reals: self._drop_by_crowd( self.dv_ensemble, self.obs_ensemble, self.dv_ensemble.shape[0] - num_dv_reals, ) self.iter_report() self.iter_num += 1 return def iter_report(self): oe = self.obs_ensemble.copy() dv = self.dv_ensemble.copy() isfeas = self.obj_func.is_feasible(oe) isnondom = self.obj_func.is_nondominated(oe) cd = self.obj_func.crowd_distance(oe) for df in [oe, dv]: df.loc[isfeas.index, "feasible"] = isfeas df.loc[isnondom.index, "nondominated"] = isnondom df.loc[cd.index, "crowd_distance"] = cd dv.to_csv("dv_ensemble.{0}.csv".format(self.iter_num + 1)) oe.to_csv("obs_ensemble.{0}.csv".format(self.iter_num + 1)) self.logger.statement("*** iteration {0} report".format(self.iter_num + 1)) self.logger.statement("{0} current solutions".format(dv.shape[0])) self.logger.statement("{0} infeasible".format(isfeas[isfeas == False].shape[0])) self.logger.statement( "{0} nondomiated".format(isnondom[isnondom == True].shape[0]) ) def _drop_by_crowd(self, dv_ensemble, obs_ensemble, ndrop, min_dist=0.1): if ndrop > dv_ensemble.shape[0]: self.logger.lraise( "EliteDiffEvol.drop_by_crowd() error: ndrop" + "{0} > dv_ensemble.shape[0] {1}".format(ndrop, dv_ensemble.shape[0]) ) self.logger.statement( "dropping {0} of {1} individuals based on crowd distance".format( ndrop, dv_ensemble.shape[0] ) ) # if min_dist is not None: # while True: # cd = self.obj_func.crowd_distance(obs_ensemble) # if cd.min() >= min_dist or ndrop == 0: # break # cd.sort_values(inplace=True, ascending=False) # # drop_idx = cd.index[-1] # self.logger.statement("dropping solution {0} - less then 'min_dist' apart{1}".\ # format(drop_idx,cd.loc[drop_idx])) # # dv_ensemble.drop(drop_idx,inplace=True) # obs_ensemble.drop(drop_idx,inplace=True) # ndrop -= 1% for idrop in range(ndrop): cd = self.obj_func.crowd_distance(obs_ensemble) cd.sort_values(inplace=True, ascending=False) # drop the first element in cd from both dv_ensemble and obs_ensemble drop_idx = cd.index[-1] self.logger.statement( "solution {0} removed based on crowding distance {1}".format( drop_idx, cd[drop_idx] ) ) dv_ensemble.drop(drop_idx, inplace=True) obs_ensemble.drop(drop_idx, inplace=True)
jtwhite79/pyemu
pyemu/prototypes/moouu.py
Python
bsd-3-clause
36,825
[ "Gaussian" ]
6d7b2e6122b9f2d8dbb0ff5324922b486944300e3f6e1a30ad9676d040d4c76e
#!/usr/bin/env python ######################################################################## # File : dirac-wms-job-get-jdl # Author : Stuart Paterson ######################################################################## """ Retrieve the current JDL of a DIRAC job Usage: dirac-wms-job-get-jdl [options] ... JobID ... Arguments: JobID: DIRAC Job ID Example: $ dirac-wms-job-get-jdl 1 {'Arguments': '-ltrA', 'CPUTime': '86400', 'DIRACSetup': 'EELA-Production', 'Executable': '/bin/ls', 'JobID': '1', 'JobName': 'DIRAC_vhamar_602138', 'JobRequirements': '[OwnerDN = /O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar; OwnerGroup = eela_user; Setup = EELA-Production; UserPriority = 1; CPUTime = 0 ]', 'OutputSandbox': ['std.out', 'std.err'], 'Owner': 'vhamar', 'OwnerDN': '/O=GRID-FR/C=FR/O=CNRS/OU=CPPM/CN=Vanessa Hamar', 'OwnerGroup': 'eela_user', 'OwnerName': 'vhamar', 'Priority': '1'} """ import DIRAC from DIRAC.Core.Base.Script import Script @Script() def main(): original = False Script.registerSwitch("O", "Original", "Gets the original JDL") # Registering arguments will automatically add their description to the help menu Script.registerArgument(["JobID: DIRAC Job ID"]) sws, args = Script.parseCommandLine(ignoreErrors=True) for switch in sws: if switch[0] == "Original" or switch[0] == "O": original = True from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments dirac = Dirac() exitCode = 0 errorList = [] for job in parseArguments(args): result = dirac.getJobJDL(job, original=original, printOutput=True) if not result["OK"]: errorList.append((job, result["Message"])) exitCode = 2 for error in errorList: print("ERROR %s: %s" % error) DIRAC.exit(exitCode) if __name__ == "__main__": main()
DIRACGrid/DIRAC
src/DIRAC/Interfaces/scripts/dirac_wms_job_get_jdl.py
Python
gpl-3.0
2,000
[ "DIRAC" ]
02445484f1586e8df2dc9c2738ef11497bef560c6968734166ebf4227c8e12cf
import sys import os import re import datetime import shutil import gzip import glob from collections import namedtuple import tempfile import traceback import Bio.SeqUtils import simtk.openmm import yaml import msmbuilder import warnings import ensembler import ensembler.version import Bio import Bio.SeqIO import Bio.pairwise2 import Bio.SubsMat.MatrixInfo from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord import mdtraj import msmbuilder from ensembler.core import get_targets_and_templates, get_templates_full_seq from ensembler.core import mpistate, logger try: import modeller import modeller.automodel except: pass if sys.version_info < (3,0): try: import subprocess32 as subprocess loopmodel_subprocess_kwargs = {'timeout': 10800} # 3 hour timeout - used for loopmodel call except ImportError: warnings.warn('subprocess32 module not available. Falling back to subprocess module, without timeout functionality.') import subprocess loopmodel_subprocess_kwargs = {} else: import subprocess loopmodel_subprocess_kwargs = {'timeout': 10800} # 3 hour timeout - used for loopmodel call TargetSetupData = namedtuple( 'TargetSetupData', ['target_starttime', 'models_target_dir'] ) class LoopmodelOutput: def __init__(self, output_text=None, loopmodel_exception=None, exception=None, trbk=None, successful=False, no_missing_residues=False): self.output_text = output_text self.exception = exception self.loopmodel_exception = loopmodel_exception self.traceback = trbk self.successful = successful self.no_missing_residues = no_missing_residues @ensembler.utils.notify_when_done def model_template_loops(process_only_these_templates=None, overwrite_structures=False, loglevel=None): """ Use Rosetta loopmodel to model missing loops in template structures. Completed templates are stored in templates/structures-modeled-loops :param process_only_these_templates: list of str :param loglevel: str :return: """ ensembler.utils.set_loglevel(loglevel) targets, templates_resolved_seq = ensembler.core.get_targets_and_templates() templates_full_seq = get_templates_full_seq() missing_residues_list = pdbfix_templates(templates_full_seq, process_only_these_templates=process_only_these_templates, overwrite_structures=overwrite_structures) loopmodel_templates(templates_resolved_seq, missing_residues_list, process_only_these_templates=process_only_these_templates, overwrite_structures=overwrite_structures) def pdbfix_templates(templates_full_seq, process_only_these_templates=None, overwrite_structures=False): """ Parameters ---------- templates_full_seq: list of BioPython SeqRecord full UniProt sequence for span of the template (including unresolved residues) process_only_these_templates: list of str overwrite_structures: bool Returns ------- missing_residues_list: list of list of OpenMM Residue """ missing_residues_sublist = [] ntemplates = len(templates_full_seq) for template_index in range(mpistate.rank, ntemplates, mpistate.size): template_full_seq = templates_full_seq[template_index] if process_only_these_templates and template_full_seq.id not in process_only_these_templates: missing_residues_sublist.append(None) continue missing_residues_sublist.append(pdbfix_template(template_full_seq, overwrite_structures=overwrite_structures)) missing_residues_gathered = mpistate.comm.gather(missing_residues_sublist, root=0) missing_residues_list = [] if mpistate.rank == 0: missing_residues_list = [None] * ntemplates for template_index in range(ntemplates): missing_residues_list[template_index] = missing_residues_gathered[template_index % mpistate.size][template_index // mpistate.size] missing_residues_list = mpistate.comm.bcast(missing_residues_list, root=0) return missing_residues_list def pdbfix_template(template_full_seq, overwrite_structures=False): """ Parameters ---------- template_full_seq: BioPython SeqRecord full UniProt sequence for span of the template (including unresolved residues) overwrite_structures: bool Returns ------- fixer.missingResidues """ try: template_pdbfixed_filepath = os.path.join( ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template_full_seq.id + '-pdbfixed.pdb' ) seq_pdbfixed_filepath = os.path.join( ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template_full_seq.id + '-pdbfixed.fasta' ) import pdbfixer import simtk.openmm.app template_filepath = os.path.join( ensembler.core.default_project_dirnames.templates_structures_resolved, template_full_seq.id + '.pdb' ) fixer = pdbfixer.PDBFixer(filename=template_filepath) chainid = next(fixer.topology.chains()).id sequence = [ Bio.SeqUtils.seq3(r).upper() for r in template_full_seq.seq ] seq_obj = pdbfixer.pdbfixer.Sequence(chainid, sequence) fixer.sequences.append(seq_obj) fixer.findMissingResidues() remove_missing_residues_at_termini(fixer, len_full_seq=len(template_full_seq.seq)) if not overwrite_structures and os.path.exists(template_pdbfixed_filepath): return fixer.missingResidues fixer.findMissingAtoms() (newTopology, newPositions, newAtoms, existingAtomMap) = fixer._addAtomsToTopology(True, True) fixer.topology = newTopology fixer.positions = newPositions with open(template_pdbfixed_filepath, 'w') as template_pdbfixed_file: simtk.openmm.app.PDBFile.writeFile( fixer.topology, fixer.positions, file=template_pdbfixed_file ) # Write sequence to file seq_pdbfixed = ''.join([Bio.SeqUtils.seq1(r.name) for r in fixer.topology.residues()]) seq_record_pdbfixed = SeqRecord(Seq(seq_pdbfixed), id=template_full_seq.id, description=template_full_seq.id) Bio.SeqIO.write([seq_record_pdbfixed], seq_pdbfixed_filepath, 'fasta') return fixer.missingResidues except (KeyboardInterrupt, ImportError): raise except Exception as e: trbk = traceback.format_exc() log_filepath = os.path.abspath(os.path.join( ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template_full_seq.id + '-pdbfixer-log.yaml' )) logfile = ensembler.core.LogFile(log_filepath) logfile.log({ 'templateid': str(template_full_seq.id), 'exception': e, 'traceback': ensembler.core.literal_str(trbk), 'mpi_rank': mpistate.rank, }) logger.error( 'MPI rank %d pdbfixer error for template %s - see logfile' % (mpistate.rank, template_full_seq.id) ) logger.debug(e) logger.debug(trbk) def remove_missing_residues_at_termini(fixer, len_full_seq): # remove C-terminal missing residues if len(fixer.missingResidues) == 0: return None sorted_missing_residues_keys = sorted(fixer.missingResidues, key=lambda x: x[1]) last_missing_residues_key = sorted_missing_residues_keys[-1] last_missing_residues_start_index = last_missing_residues_key[1] last_missing_residues = fixer.missingResidues[last_missing_residues_key] nmissing_residues_up_to_last = sum([len(fixer.missingResidues[key]) for key in sorted_missing_residues_keys[:-1]]) if last_missing_residues_start_index + nmissing_residues_up_to_last + len(last_missing_residues) == len_full_seq: fixer.missingResidues.pop(last_missing_residues_key) # remove N-terminal missing residues fixer.missingResidues.pop((0, 0), None) def loopmodel_templates(templates, missing_residues, process_only_these_templates=None, overwrite_structures=False): """ Parameters ---------- templates: list of BioPython SeqRecord only the id is used missing_residues: list of list of OpenMM Residue process_only_these_templates: bool overwrite_structures: bool """ for template_index in range(mpistate.rank, len(templates), mpistate.size): template = templates[template_index] if process_only_these_templates and template.id not in process_only_these_templates: continue if mpistate.size > 1: logger.info('MPI rank %d modeling missing loops for template %s' % (mpistate.rank, template.id)) else: logger.info('Modeling missing loops for template %s' % template.id) loopmodel_template(template, missing_residues[template_index], overwrite_structures=overwrite_structures) def loopmodel_template(template, missing_residues, overwrite_structures=False): template_filepath = os.path.abspath(os.path.join(ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template.id + '-pdbfixed.pdb')) output_pdb_filepath = os.path.abspath(os.path.join(ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template.id + '.pdb')) loop_filepath = os.path.abspath(os.path.join(ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template.id + '.loop')) output_score_filepath = os.path.abspath(os.path.join(ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template.id + '-loopmodel-score.sc')) log_filepath = os.path.abspath(os.path.join(ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template.id + '-loopmodel-log.yaml')) if not overwrite_structures: if os.path.exists(log_filepath): return logfile = ensembler.core.LogFile(log_filepath) write_loop_file(template, missing_residues) starttime = datetime.datetime.utcnow() if len(missing_residues) == 0: loopmodel_output = LoopmodelOutput(successful=True, no_missing_residues=True) else: loopmodel_output = run_loopmodel(template_filepath, loop_filepath, output_pdb_filepath, output_score_filepath) if not loopmodel_output.successful: logger.error('MPI rank %d Loopmodel error for template %s - see logfile' % (mpistate.rank, template.id)) timedelta = datetime.datetime.utcnow() - starttime logfile.log({ 'templateid': str(template.id), 'no_missing_residues': loopmodel_output.no_missing_residues, 'loopmodel_output': loopmodel_output.output_text, 'mpi_rank': mpistate.rank, 'successful': loopmodel_output.successful, 'exception': loopmodel_output.exception, 'loopmodel_exception': loopmodel_output.loopmodel_exception, 'traceback': loopmodel_output.traceback, 'timing': ensembler.core.strf_timedelta(timedelta), }) def write_loop_file(template, missing_residues): loop_file_text = '' loop_residues_added = 0 loop_residues_data = [(key[1], len(residues)) for key, residues in missing_residues.iteritems()] loop_residues_data = sorted(loop_residues_data, key=lambda x: x[0]) for loop_residue_data in loop_residues_data: residue_number, nresidues = loop_residue_data loop_begin = residue_number + loop_residues_added # 1-based, one residue before the loop loop_end = residue_number + nresidues + loop_residues_added + 1 # 1-based, one residue after the loop loop_residues_added += nresidues # Note that missing residues at termini (which cannot be modeled by Rosetta loopmodel) have already been removed from the PDBFixer.missingResidues dictionary loop_file_text += 'LOOP {0} {1} - - 1\n'.format(loop_begin, loop_end) loop_filepath = os.path.join(ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template.id + '.loop') with open(loop_filepath, 'w') as loop_file: loop_file.write(loop_file_text) def run_loopmodel(input_template_pdb_filepath, loop_filepath, output_pdb_filepath, output_score_filepath, loopmodel_executable_filepath=None, nmodels_to_build=1): if loopmodel_executable_filepath is None: loopmodel_executable_filepath = ensembler.core.find_loopmodel_executable() temp_dir = tempfile.mkdtemp() temp_template_filepath = os.path.join(temp_dir, 'template.pdb') temp_loop_filepath = os.path.join(temp_dir, 'template.loop') temp_output_model_filepath = os.path.join(temp_dir, 'template_0001.pdb') temp_output_score_filepath = os.path.join(temp_dir, 'score.sc') minirosetta_database_path = os.environ.get('MINIROSETTA_DATABASE') shutil.copy(input_template_pdb_filepath, temp_template_filepath) shutil.copy(loop_filepath, temp_loop_filepath) try: output_text = subprocess.check_output( [ loopmodel_executable_filepath, '-database', minirosetta_database_path, '-in::file::s', temp_template_filepath, '-loops:loop_file', temp_loop_filepath, '-out:path:all', temp_dir, '-loops:remodel', 'perturb_kic', '-loops:refine', 'refine_kic', '-ex1', '-ex2', '-nstruct', '%d' % nmodels_to_build, '-loops:max_kic_build_attempts', '100', '-in:file:fullatom', '-overwrite', ], stderr=subprocess.STDOUT, **loopmodel_subprocess_kwargs ) if os.path.exists(temp_output_model_filepath): shutil.copy(temp_output_model_filepath, output_pdb_filepath) shutil.copy(temp_output_score_filepath, output_score_filepath) shutil.rmtree(temp_dir) return LoopmodelOutput(output_text=output_text, successful=True) else: shutil.rmtree(temp_dir) return LoopmodelOutput(output_text=output_text, successful=False) except KeyboardInterrupt: shutil.rmtree(temp_dir) raise except subprocess.CalledProcessError as e: shutil.rmtree(temp_dir) return LoopmodelOutput(loopmodel_exception=e.output, trbk=traceback.format_exc(), successful=False) except subprocess.TimeoutExpired as e: shutil.rmtree(temp_dir) return LoopmodelOutput(output_text=e.output, exception=e, trbk=traceback.format_exc(), successful=False) except Exception as e: shutil.rmtree(temp_dir) return LoopmodelOutput(output_text=output_text, exception=e, trbk=traceback.format_exc(), successful=False) def check_loopmodel_complete_and_successful(template): output_pdb_filepath = os.path.join(ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template.id + '.pdb') log_filepath = os.path.join(ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template.id + '-loopmodel-log.yaml') if os.path.exists(log_filepath) and os.path.exists(output_pdb_filepath): with open(log_filepath) as log_file: log_data = yaml.load(log_file, Loader=ensembler.core.YamlLoader) if log_data.get('successful') == True: return True else: return False @ensembler.utils.notify_when_done def align_targets_and_templates(process_only_these_targets=None, process_only_these_templates=None, substitution_matrix='gonnet', gap_open=-10, gap_extend=-0.5, loglevel=None ): """ Conducts pairwise alignments of target sequences against template sequences. Stores Modeller-compatible 'alignment.pir' files in each model directory, and also outputs a table of model IDs, sorted by sequence identity. Parameters ---------- process_only_these_targets: process_only_these_templates: substitution_matrix: str Specify an amino acid substitution matrix available from Bio.SubsMat.MatrixInfo """ ensembler.utils.set_loglevel(loglevel) targets, templates_resolved_seq = ensembler.core.get_targets_and_templates() ntemplates = len(templates_resolved_seq) nselected_templates = len(process_only_these_templates) if process_only_these_templates else ntemplates for target in targets: if process_only_these_targets and target.id not in process_only_these_targets: continue if mpistate.rank == 0: logger.info('Working on target %s...' % target.id) models_target_dir = os.path.join(ensembler.core.default_project_dirnames.models, target.id) ensembler.utils.create_dir(models_target_dir) seq_identity_data_sublist = [] for template_index in range(mpistate.rank, ntemplates, mpistate.size): template_id = templates_resolved_seq[template_index].id if os.path.exists(os.path.join(ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template_id + '.pdb')): remodeled_seq_filepath = os.path.join(ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template_id + '-pdbfixed.fasta') template = list(Bio.SeqIO.parse(remodeled_seq_filepath, 'fasta'))[0] else: template = templates_resolved_seq[template_index] if process_only_these_templates and template_id not in process_only_these_templates: continue model_dir = os.path.abspath(os.path.join(ensembler.core.default_project_dirnames.models, target.id, template_id)) ensembler.utils.create_dir(model_dir) aln = align_target_template( target, template, substitution_matrix=substitution_matrix, gap_open=gap_open, gap_extend=gap_extend ) aln_filepath = os.path.join(model_dir, 'alignment.pir') write_modeller_pir_aln_file(aln, target, template, pir_aln_filepath=aln_filepath) seq_identity_data_sublist.append({ 'templateid': template_id, 'seq_identity': calculate_seq_identity(aln), }) seq_identity_data_gathered = mpistate.comm.gather(seq_identity_data_sublist, root=0) seq_identity_data = [] if mpistate.rank == 0: seq_identity_data = [None] * nselected_templates for i in range(nselected_templates): seq_identity_data[i] = seq_identity_data_gathered[i % mpistate.size][i // mpistate.size] seq_identity_data = mpistate.comm.bcast(seq_identity_data, root=0) seq_identity_data = sorted(seq_identity_data, key=lambda x: x['seq_identity'], reverse=True) write_sorted_seq_identities(target, seq_identity_data) def align_target_template(target, template, substitution_matrix='gonnet', gap_open=-10, gap_extend=-0.5 ): """ Parameters ---------- target: BioPython SeqRecord template: BioPython SeqRecord substitution_matrix: str Specify an amino acid substitution matrix available from Bio.SubsMat.MatrixInfo gap_open: float or int gap_extend: float or int Returns ------- alignment: list """ matrix = getattr(Bio.SubsMat.MatrixInfo, substitution_matrix) aln = Bio.pairwise2.align.globalds(str(target.seq), str(template.seq), matrix, gap_open, gap_extend) return aln def calculate_seq_identity(aln): len_shorter_seq = min([len(aln[0][0].replace('-', '')), len(aln[0][1].replace('-', ''))]) seq_id = 0 for r in range(len(aln[0][0])): if aln[0][0][r] == aln[0][1][r]: seq_id += 1 seq_id = 100 * float(seq_id) / float(len_shorter_seq) return seq_id def write_sorted_seq_identities(target, seq_identity_data): seq_identity_file_str = '' for seq_identity_dict in seq_identity_data: seq_identity_file_str += '%-30s %.1f\n' % (seq_identity_dict['templateid'], seq_identity_dict['seq_identity']) seq_identity_filepath = os.path.join(ensembler.core.default_project_dirnames.models, target.id, 'sequence-identities.txt') with open(seq_identity_filepath, 'w') as seq_identity_file: seq_identity_file.write(seq_identity_file_str) @ensembler.utils.notify_when_done def build_models(process_only_these_targets=None, process_only_these_templates=None, model_seqid_cutoff=None, write_modeller_restraints_file=False, loglevel=None): """Uses the build_model method to build homology models for a given set of targets and templates. MPI-enabled. """ # Note that this code uses an os.chdir call to switch into a temp directory before running Modeller. # This is because Modeller writes various output files in the current directory, and there is NO WAY # to define where these files are written, other than to chdir beforehand. If running this routine # in parallel, it is likely that occasional exceptions will occur, due to concurrent processes # making os.chdir calls. ensembler.utils.set_loglevel(loglevel) targets, templates_resolved_seq = get_targets_and_templates() if process_only_these_templates: selected_template_indices = [i for i, seq in enumerate(templates_resolved_seq) if seq.id in process_only_these_templates] else: selected_template_indices = range(len(templates_resolved_seq)) for target in targets: if process_only_these_targets and target.id not in process_only_these_targets: continue target_setup_data = build_models_target_setup(target) if model_seqid_cutoff: process_only_these_templates = ensembler.core.select_templates_by_seqid_cutoff(target.id, seqid_cutoff=model_seqid_cutoff) selected_template_indices = [i for i, seq in enumerate(templates_resolved_seq) if seq.id in process_only_these_templates] ntemplates_selected = len(selected_template_indices) for template_index in range(mpistate.rank, ntemplates_selected, mpistate.size): template_resolved_seq = templates_resolved_seq[selected_template_indices[template_index]] if process_only_these_templates and template_resolved_seq.id not in process_only_these_templates: continue build_model(target, template_resolved_seq, target_setup_data, write_modeller_restraints_file=write_modeller_restraints_file, loglevel=loglevel) write_build_models_metadata(target, target_setup_data, process_only_these_targets, process_only_these_templates, model_seqid_cutoff, write_modeller_restraints_file) def build_model(target, template_resolved_seq, target_setup_data, write_modeller_restraints_file=False, loglevel=None): """Uses Modeller to build a homology model for a given target and template. Will not run Modeller if the output files already exist. Parameters ---------- target : BioPython SeqRecord template_resolved_seq : BioPython SeqRecord Must be a corresponding .pdb template file with the same ID in the templates/structures directory. template_resolved_seq : BioPython SeqRecord Must be a corresponding .pdb template file with the same ID in the templates/structures directory. target_setup_data : TargetSetupData obj write_modeller_restraints_file : bool Write file containing restraints used by Modeller - note that this file can be relatively large, e.g. ~300KB per model for a protein kinase domain target. loglevel : bool """ ensembler.utils.set_loglevel(loglevel) template_structure_dir = os.path.abspath( ensembler.core.default_project_dirnames.templates_structures_modeled_loops ) if os.path.exists(os.path.join(template_structure_dir, template_resolved_seq.id + '.pdb')): remodeled_seq_filepath = os.path.join( ensembler.core.default_project_dirnames.templates_structures_modeled_loops, template_resolved_seq.id + '-pdbfixed.fasta' ) template = list(Bio.SeqIO.parse(remodeled_seq_filepath, 'fasta'))[0] else: template = template_resolved_seq template_structure_dir = os.path.abspath( ensembler.core.default_project_dirnames.templates_structures_resolved ) model_dir = os.path.abspath(os.path.join(target_setup_data.models_target_dir, template.id)) if not os.path.exists(model_dir): ensembler.utils.create_dir(model_dir) model_pdbfilepath = os.path.abspath(os.path.join(model_dir, 'model.pdb.gz')) modeling_log_filepath = os.path.abspath(os.path.join(model_dir, 'modeling-log.yaml')) check_model_pdbfilepath_ends_in_pdbgz(model_pdbfilepath) model_pdbfilepath_uncompressed = model_pdbfilepath[:-3] if check_all_model_files_present(model_dir): logger.debug( "Output files already exist for target '%s' // template '%s'; files were not overwritten." % (target.id, template.id) ) return logger.info( '-------------------------------------------------------------------------\n' 'Modelling "%s" => "%s"\n' '-------------------------------------------------------------------------' % (target.id, template.id) ) # aln = align_target_template(target, template) aln_filepath = os.path.abspath(os.path.join(model_dir, 'alignment.pir')) # write_modeller_pir_aln_file(aln, target, template, pir_aln_filepath=aln_filepath) log_file = init_build_model_logfile(modeling_log_filepath) with ensembler.utils.enter_temp_dir(): try: start = datetime.datetime.utcnow() shutil.copy(aln_filepath, 'alignment.pir') run_modeller(target, template, model_dir, model_pdbfilepath, model_pdbfilepath_uncompressed, template_structure_dir, write_modeller_restraints_file=write_modeller_restraints_file) if os.path.getsize(model_pdbfilepath) < 1: raise Exception('Output PDB file is empty.') end_successful_build_model_logfile(log_file, start) except Exception as e: end_exception_build_model_logfile(e, log_file) def get_modeller_version(): """Hacky attempt to get Modeller version by regex searching the installation directory or README file. """ modeller_version = get_modeller_version_from_install_path(modeller) if modeller_version is not None: return modeller_version modeller_version = get_modeller_version_from_readme(modeller) if modeller_version is not None: return modeller_version def get_modeller_version_from_install_path(modeller_module): regex = re.compile('/modeller-[0-9.]{2,6}/') match = re.search(regex, modeller_module.__file__) if match is not None: version = match.group()[10:-1] return version def get_modeller_version_from_readme(modeller_module): readme_file_path = os.path.join(os.path.dirname(modeller_module.__file__), '..', '..', 'README') if os.path.exists(readme_file_path): with open(readme_file_path) as readme_file: # try first 10 lines # example desired line: # MODELLER 9.11, 2012/08/29, r8834 for i in range(10): line = readme_file.readline().strip() regex = re.compile('MODELLER [0-9.]{2,6}') match = re.search(regex, line) if match is not None: version = match.group()[9:] return version def build_models_target_setup(target): target_setup_data = None if mpistate.rank == 0: models_target_dir = os.path.join(ensembler.core.default_project_dirnames.models, target.id) target_starttime = datetime.datetime.utcnow() logger.info( '=========================================================================\n' 'Working on target "%s"\n' '=========================================================================' % target.id ) target_setup_data = TargetSetupData( target_starttime=target_starttime, models_target_dir=models_target_dir ) target_setup_data = mpistate.comm.bcast(target_setup_data, root=0) return target_setup_data def gen_build_models_metadata(target, target_setup_data, process_only_these_targets, process_only_these_templates, model_seqid_cutoff, write_modeller_restraints_file): """ Generate build_models metadata for a given target. :param target: BioPython SeqRecord :param target_setup_data: :return: metadata: dict """ datestamp = ensembler.core.get_utcnow_formatted() nsuccessful_models = subprocess.check_output(['find', target_setup_data.models_target_dir, '-name', 'model.pdb.gz']).count('\n') target_timedelta = datetime.datetime.utcnow() - target_setup_data.target_starttime modeller_version = get_modeller_version() metadata = { 'target_id': target.id, 'write_modeller_restraints_file': write_modeller_restraints_file, 'model_seqid_cutoff': model_seqid_cutoff, 'datestamp': datestamp, 'timing': ensembler.core.strf_timedelta(target_timedelta), 'nsuccessful_models': nsuccessful_models, 'process_only_these_targets': process_only_these_targets, 'process_only_these_templates': process_only_these_templates, 'python_version': sys.version.split('|')[0].strip(), 'python_full_version': ensembler.core.literal_str(sys.version), 'ensembler_version': ensembler.version.short_version, 'ensembler_commit': ensembler.version.git_revision, 'modeller_version': modeller_version if modeller_version is not None else '', 'biopython_version': Bio.__version__ } return metadata def check_model_pdbfilepath_ends_in_pdbgz(model_pdbfilepath): if model_pdbfilepath[-7:] != '.pdb.gz': raise Exception('model_pdbfilepath (%s) must end in .pdb.gz' % model_pdbfilepath) def check_all_model_files_present(model_dir): seqid_filepath = os.path.abspath(os.path.join(model_dir, 'sequence-identity.txt')) model_pdbfilepath = os.path.abspath(os.path.join(model_dir, 'model.pdb.gz')) aln_filepath = os.path.abspath(os.path.join(model_dir, 'alignment.pir')) files_to_check = [model_pdbfilepath, seqid_filepath, aln_filepath] files_present = [os.path.exists(filename) for filename in files_to_check] return all(files_present) def init_build_model_logfile(modeling_log_filepath): log_data = { 'mpi_rank': mpistate.rank, 'complete': False, } log_filepath = modeling_log_filepath log_file = ensembler.core.LogFile(log_filepath) log_file.log(new_log_data=log_data) return log_file def write_modeller_pir_aln_file(aln, target, template, pir_aln_filepath='alignment.pir'): contents = "Target-template alignment\n" contents += ">P1;%s\n" % target.id contents += "sequence:%s:FIRST:@:LAST :@:::-1.00:-1.00\n" % target.id contents += aln[0][0] + '*\n' contents += ">P1;%s\n" % template.id contents += "structureX:%s:FIRST:@:LAST : :undefined:undefined:-1.00:-1.00\n" % template.id contents += aln[0][1] + '*\n' with open(pir_aln_filepath, 'w') as outfile: outfile.write(contents) def run_modeller(target, template, model_dir, model_pdbfilepath, model_pdbfilepath_uncompressed, template_structure_dir, aln_filepath='alignment.pir', write_modeller_restraints_file=False): modeller.log.none() env = modeller.environ() env.io.atom_files_directory = [template_structure_dir] a = modeller.automodel.allhmodel( env, alnfile=aln_filepath, knowns=template.id, sequence=target.id ) a.make() # do homology modeling save_modeller_output_files(target, model_dir, a, env, model_pdbfilepath, model_pdbfilepath_uncompressed, write_modeller_restraints_file=write_modeller_restraints_file) def save_modeller_output_files(target, model_dir, a, env, model_pdbfilepath, model_pdbfilepath_uncompressed, write_modeller_restraints_file=False): # save PDB file # Note that the uncompressed pdb file needs to be kept until after the clustering step has completed tmp_model_pdbfilepath = a.outputs[0]['name'] target_model = modeller.model(env, file=tmp_model_pdbfilepath) target_model.write(file=model_pdbfilepath_uncompressed) with open(model_pdbfilepath_uncompressed) as model_pdbfile: with gzip.open(model_pdbfilepath, 'w') as model_pdbfilegz: model_pdbfilegz.write(model_pdbfile.read()) # Write sequence identity. seqid_filepath = os.path.abspath(os.path.join(model_dir, 'sequence-identity.txt')) with open(seqid_filepath, 'w') as seqid_file: seqid_file.write('%.1f\n' % target_model.seq_id) # Copy restraints. if write_modeller_restraints_file: restraint_filepath = os.path.abspath(os.path.join(model_dir, 'restraints.rsr.gz')) with open('%s.rsr' % target.id, 'r') as rsrfile: with gzip.open(restraint_filepath, 'wb') as rsrgzfile: rsrgzfile.write(rsrfile.read()) def end_successful_build_model_logfile(log_file, start): end = datetime.datetime.utcnow() timing = ensembler.core.strf_timedelta(end - start) log_data = { 'complete': True, 'timing': timing, } log_file.log(new_log_data=log_data) def end_exception_build_model_logfile(e, log_file): trbk = traceback.format_exc() log_data = { 'exception': e, 'traceback': ensembler.core.literal_str(trbk), } log_file.log(new_log_data=log_data) @ensembler.utils.mpirank0only_and_end_with_barrier def write_build_models_metadata(target, target_setup_data, process_only_these_targets, process_only_these_templates, model_seqid_cutoff, write_modeller_restraints_file): project_metadata = ensembler.core.ProjectMetadata(project_stage='build_models', target_id=target.id) metadata = gen_build_models_metadata(target, target_setup_data, process_only_these_targets, process_only_these_templates, model_seqid_cutoff, write_modeller_restraints_file) project_metadata.add_data(metadata) project_metadata.write() @ensembler.utils.mpirank0only_and_end_with_barrier @ensembler.utils.notify_when_done def cluster_models(process_only_these_targets=None, cutoff=0.06, loglevel=None): """Cluster models based on RMSD, and filter out non-unique models as determined by a given cutoff. Parameters ---------- cutoff : float Minimum distance cutoff for RMSD clustering (nm) Runs serially. """ # TODO refactor ensembler.utils.set_loglevel(loglevel) targets, templates_resolved_seq = get_targets_and_templates() templates = templates_resolved_seq for target in targets: if process_only_these_targets and (target.id not in process_only_these_targets): continue models_target_dir = os.path.join(ensembler.core.default_project_dirnames.models, target.id) if not os.path.exists(models_target_dir): continue # ============================= # Construct a mdtraj trajectory containing all models # ============================= starttime = datetime.datetime.utcnow() logger.debug('Building a list of valid models...') model_pdbfilenames_compressed = { template.id: os.path.join(models_target_dir, template.id, 'model.pdb.gz') for template in templates } model_pdbfilenames_uncompressed = { template.id: os.path.join(models_target_dir, template.id, 'model.pdb') for template in templates } valid_templateids = [ templateid for templateid in model_pdbfilenames_compressed if os.path.exists(model_pdbfilenames_compressed[templateid]) ] # Write uncompressed model.pdb files from model.pdb.gz if necessary for templateid in valid_templateids: if not os.path.exists(model_pdbfilenames_uncompressed[templateid]) or os.path.getsize(model_pdbfilenames_uncompressed[templateid]) == 0: with gzip.open(model_pdbfilenames_compressed[templateid]) as model_pdbfile_compressed: with open(model_pdbfilenames_uncompressed[templateid], 'w') as model_pdbfile: model_pdbfile.write(model_pdbfile_compressed.read()) logger.info('Constructing a trajectory containing all valid models...') if len(valid_templateids) == 0: logger.info('No models found for target {0}.'.format(target.id)) continue valid_model_pdbfilenames_uncompressed = [ model_pdbfilenames_uncompressed[templateid] for templateid in valid_templateids ] traj = mdtraj.load(valid_model_pdbfilenames_uncompressed) # ============================= # Clustering # ============================= logger.info('Conducting RMSD-based clustering...') # Remove any existing unique_by_clustering files for f in glob.glob(models_target_dir+'/*_PK_*/unique_by_clustering'): os.unlink(f) CAatoms = [a.index for a in traj.topology.atoms if a.name == 'CA'] unique_templateids = models_regular_spatial_clustering( valid_templateids, traj, atom_indices=CAatoms, cutoff=cutoff ) write_unique_by_clustering_files(unique_templateids, models_target_dir) with open(os.path.join(models_target_dir, 'unique-models.txt'), 'w') as uniques_file: for u in unique_templateids: uniques_file.write(u+'\n') logger.info( '%d unique models (from original set of %d) using cutoff of %.3f nm' % (len(unique_templateids), len(valid_templateids), cutoff) ) for template in templates: model_dir = os.path.join(models_target_dir, template.id) model_pdbfilename = os.path.join(model_dir, 'model.pdb') if os.path.exists(model_pdbfilename): os.remove(model_pdbfilename) # ======== # Metadata # ======== project_metadata = ensembler.core.ProjectMetadata( project_stage='cluster_models', target_id=target.id ) datestamp = ensembler.core.get_utcnow_formatted() timedelta = datetime.datetime.utcnow() - starttime metadata = { 'target_id': target.id, 'datestamp': datestamp, 'nunique_models': len(unique_templateids), 'python_version': sys.version.split('|')[0].strip(), 'python_full_version': ensembler.core.literal_str(sys.version), 'ensembler_version': ensembler.version.short_version, 'ensembler_commit': ensembler.version.git_revision, 'biopython_version': Bio.__version__, 'mdtraj_version': mdtraj.version.short_version, 'mdtraj_commit': mdtraj.version.git_revision, 'timing': ensembler.core.strf_timedelta(timedelta), } project_metadata.add_data(metadata) project_metadata.write() def models_regular_spatial_clustering(templateids, traj, atom_indices=None, cutoff=0.06): """ Use MSMBuilder to perform RMSD-based regular spatial clustering on a set of models. Parameters ---------- templateids: list of str traj: mdtraj.Trajectory atom_indices: np.array cutoff: float Minimum distance cutoff for RMSD clustering (nm) """ if atom_indices: reduced_traj = traj.atom_slice(atom_indices) else: reduced_traj = traj import msmbuilder.cluster cluster = msmbuilder.cluster.RegularSpatial(cutoff, metric='rmsd') cluster_labels = cluster.fit_predict([reduced_traj])[0] unique_templateids = list(set([templateids[t] for t in cluster_labels])) return unique_templateids def write_unique_by_clustering_files(unique_templateids, models_target_dir): for templateid in unique_templateids: unique_filename = os.path.join(models_target_dir, templateid, 'unique_by_clustering') with open(unique_filename, 'w') as unique_file: pass def _deprecated_models_regular_spatial_clustering(templateids, traj, atom_indices=None, cutoff=0.06): """ Superseded by models_regular_spatial_clustering """ mdtraj_rmsd_args = {} if atom_indices: mdtraj_rmsd_args['atom_indices'] = atom_indices unique_templateids = [] min_rmsd = [] # Iterate through models for (t, templateid) in enumerate(templateids): # Add the first templateid to the list of uniques if t==0: unique_templateids.append(templateid) continue # Calculate rmsds of models up to t against the model t. rmsds = mdtraj.rmsd(traj[0:t], traj[t], parallel=False, **mdtraj_rmsd_args) min_rmsd.append(min(rmsds)) # If any rmsd is less than cutoff, discard; otherwise add to list of uniques if min_rmsd[-1] < cutoff: continue else: unique_templateids.append(templateid) return unique_templateids
choderalab/ensembler
ensembler/modeling.py
Python
gpl-2.0
42,150
[ "Biopython", "MDTraj", "OpenMM" ]
7017d4b6d1b48774f9440a54f80d7e957158cfdb417ae4d302f3f90bb24cefcb
# $Id$ # # Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # """unit testing code for the EState indices validation values are from the paper (JCICS _31_ 76-81 (1991)) """ import unittest from io import StringIO import numpy as np from rdkit import Chem from rdkit.Chem import EState class TestCase(unittest.TestCase): def _compareEstates(self, val1, val2, msg, tol=1e-2): maxV = max(abs(val1 - val2)) self.assertLess(maxV, tol, msg) def _validate(self, vals, places=2, tol=1e-2, debug=False): for smi, ans in vals: ans = np.array(ans) mol = Chem.MolFromSmiles(smi) inds = EState.EStateIndices(mol) if debug: # pragma: nocover print(inds) self._compareEstates(ans, inds, 'bad EStates for smiles: {0}'.format(smi), tol=tol) self.assertLess(abs(EState.MaxEStateIndex(mol) - max(ans)), tol) self.assertLess(abs(EState.MinEStateIndex(mol) - min(ans)), tol) self.assertLess(abs(EState.MaxAbsEStateIndex(mol) - max(abs(ans))), tol) self.assertLess(abs(EState.MinAbsEStateIndex(mol) - min(abs(ans))), tol) def test_simpleMolecules(self): data = [ ('CCCC', [2.18, 1.32, 1.32, 2.18]), ('CCCCC', [2.21, 1.34, 1.39, 1.34, 2.21]), ('CCCCCCC', [2.24, 1.36, 1.42, 1.44, 1.42, 1.36, 2.24]), ('CCCCCCCCCC', [2.27, 1.37, 1.44, 1.46, 1.47, 1.47, 1.46, 1.44, 1.37, 2.27]), ] self._validate(data) def test_isomers(self): data = [ ('CCCCCC', [2.23, 1.36, 1.41, 1.41, 1.36, 2.23]), ('CCC(C)CC', [2.23, 1.33, 0.94, 2.28, 1.33, 2.23]), ('CC(C)CCC', [2.25, 0.90, 2.25, 1.38, 1.33, 2.22]), ('CC(C)(C)CC', [2.24, 0.54, 2.24, 2.24, 1.27, 2.20]), ] self._validate(data) def test_heteroatoms1(self): data = [ ('CCCCOCCCC', [2.18, 1.24, 1.21, 0.95, 5.31, 0.95, 1.21, 1.24, 2.18]), ('CCC(C)OC(C)CC', [2.15, 1.12, 0.43, 2.12, 5.54, 0.43, 2.12, 1.12, 2.15]), ('CC(C)(C)OC(C)(C)C', [2.07, -0.02, 2.07, 2.07, 5.63, -0.02, 2.07, 2.07, 2.07]), ('CC(C)CC', [2.22, 0.88, 2.22, 1.31, 2.20]), ('CC(C)CN', [2.10, 0.66, 2.10, 0.81, 5.17]), ('CC(C)CO', [1.97, 0.44, 1.97, 0.31, 8.14]), ('CC(C)CF', [1.85, 0.22, 1.85, -0.19, 11.11]), ('CC(C)CCl', [2.09, 0.65, 2.09, 0.78, 5.34]), ('CC(C)CBr', [2.17, 0.80, 2.17, 1.11, 3.31]), ('CC(C)CI', [2.21, 0.87, 2.21, 1.28, 2.38]), ] self._validate(data, debug=False) def test_heteroatoms2(self): data = [ ('CC(N)C(=O)O', [1.42, -0.73, 4.84, -0.96, 9.57, 7.86]), ('CCOCC', [1.99, 0.84, 4.83, 0.84, 1.99]), # NOTE: this doesn't match the values in the paper ('CCSCC', [2.17, 1.26, 1.96, 1.26, 2.17]), ('CC(=O)OC', [1.36, -0.24, 9.59, 4.11, 1.35]), ('CC(=S)OC', [1.73, 0.59, 4.47, 4.48, 1.56]), ] self._validate(data, debug=False) def test_aromatics(self): # aromatics with heteroatoms data = [ ('Fc1ccc(C)cc1', [12.09, -0.17, 1.45, 1.75, 1.09, 1.93, 1.75, 1.45]), ('Clc1ccc(C)cc1', [5.61, 0.80, 1.89, 1.99, 1.24, 2.04, 1.99, 1.89]), ('Brc1ccc(C)cc1', [3.35, 1.14, 2.04, 2.07, 1.30, 2.08, 2.07, 2.04]), ('Ic1ccc(C)cc1', [2.30, 1.30, 2.10, 2.11, 1.32, 2.09, 2.11, 2.10]), ] self._validate(data, debug=False) def test_GetPrincipleQuantumNumber(self): for principalQN, (nmin, nmax) in enumerate( [(1, 2), (3, 10), (11, 18), (19, 36), (37, 54), (55, 86), (87, 120)], 1): for n in range(nmin, nmax + 1): self.assertEqual(EState.GetPrincipleQuantumNumber(n), principalQN) def test_cacheEstate(self): mol = Chem.MolFromSmiles('CCCC') expected = [2.18, 1.32, 1.32, 2.18] # The mol object has no information about E-states self.assertFalse(hasattr(mol, '_eStateIndices')) inds = EState.EStateIndices(mol) self._compareEstates(inds, expected, 'cacheTest') # We now have E-states stored with the molecule self.assertTrue(hasattr(mol, '_eStateIndices')) # Let's make sure that we skip the calculation next time if force is False mol._eStateIndices = 'cached' self.assertTrue(hasattr(mol, '_eStateIndices')) inds = EState.EStateIndices(mol, force=False) self.assertEqual(inds, 'cached') # But with force (default) we calculate again inds = EState.EStateIndices(mol) self._compareEstates(inds, expected, 'cacheTest') self._compareEstates(mol._eStateIndices, expected, 'cacheTest') def test_exampleCode(self): # We make sure that the example code runs from rdkit.TestRunner import redirect_stdout f = StringIO() with redirect_stdout(f): EState.EState._exampleCode() s = f.getvalue() self.assertIn('CC(N)C(=O)O', s) if __name__ == '__main__': # pragma: nocover unittest.main()
greglandrum/rdkit
rdkit/Chem/EState/UnitTestEState.py
Python
bsd-3-clause
5,360
[ "RDKit" ]
2ad31ef1147bde8fa1b3d23b03b9eb043d39883aa7dfc9282158530a9e1aa9e0
#!/usr/bin/env python ################################################## ## DEPENDENCIES import sys import os import os.path try: import builtins as builtin except ImportError: import __builtin__ as builtin from os.path import getmtime, exists import time import types from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple from Cheetah.Template import Template from Cheetah.DummyTransaction import * from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList from Cheetah.CacheRegion import CacheRegion import Cheetah.Filters as Filters import Cheetah.ErrorCatchers as ErrorCatchers from urllib import quote from Plugins.Extensions.OpenWebif.local import tstrings ################################################## ## MODULE CONSTANTS VFFSL=valueFromFrameOrSearchList VFSL=valueFromSearchList VFN=valueForName currentTime=time.time __CHEETAH_version__ = '2.4.4' __CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0) __CHEETAH_genTime__ = 1453357629.767626 __CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016' __CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/ajax/providers.tmpl' __CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016' __CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine' if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple: raise AssertionError( 'This template was compiled with Cheetah version' ' %s. Templates compiled before version %s must be recompiled.'%( __CHEETAH_version__, RequiredCheetahVersion)) ################################################## ## CLASSES class providers(Template): ################################################## ## CHEETAH GENERATED METHODS def __init__(self, *args, **KWs): super(providers, self).__init__(*args, **KWs) if not self._CHEETAH__instanceInitialized: cheetahKWArgs = {} allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split() for k,v in KWs.items(): if k in allowedKWs: cheetahKWArgs[k] = v self._initCheetahInstance(**cheetahKWArgs) def respond(self, trans=None): ## CHEETAH: main method generated for this template if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)): trans = self.transaction # is None unless self.awake() was called if not trans: trans = DummyTransaction() _dummyTrans = True else: _dummyTrans = False write = trans.response().write SL = self._CHEETAH__searchList _filter = self._CHEETAH__currentFilter ######################################## ## START - generated method body write(u'''<script> $(function() { InitAccordeon("#accordionP");}); </script> <div id="accordionP"> ''') for provider in VFFSL(SL,"providers",True): # generated from line 7, col 1 write(u'''\t<h1><a href="#" id="ajax/channels?id=''') _v = VFFSL(SL,"quote",False)(VFFSL(SL,"provider",True)[0]) # u'$quote($provider[0])' on line 8, col 39 if _v is not None: write(_filter(_v, rawExpr=u'$quote($provider[0])')) # from line 8, col 39. write(u'''&stype=''') _v = VFFSL(SL,"stype",True) # u'$stype' on line 8, col 66 if _v is not None: write(_filter(_v, rawExpr=u'$stype')) # from line 8, col 66. write(u'''">''') _v = VFFSL(SL,"provider",True)[1] # u'$provider[1]' on line 8, col 74 if _v is not None: write(_filter(_v, rawExpr=u'$provider[1]')) # from line 8, col 74. write(u'''</a></h1> \t<div> ''') _v = VFFSL(SL,"tstrings",True)['loading'] # u"$tstrings['loading']" on line 10, col 1 if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['loading']")) # from line 10, col 1. write(u''' ... \t</div> ''') write(u'''</div>''') ######################################## ## END - generated method body return _dummyTrans and trans.response().getvalue() or "" ################################################## ## CHEETAH GENERATED ATTRIBUTES _CHEETAH__instanceInitialized = False _CHEETAH_version = __CHEETAH_version__ _CHEETAH_versionTuple = __CHEETAH_versionTuple__ _CHEETAH_genTime = __CHEETAH_genTime__ _CHEETAH_genTimestamp = __CHEETAH_genTimestamp__ _CHEETAH_src = __CHEETAH_src__ _CHEETAH_srcLastModified = __CHEETAH_srcLastModified__ _mainCheetahMethod_for_providers= 'respond' ## END CLASS DEFINITION if not hasattr(providers, '_initCheetahAttributes'): templateAPIClass = getattr(providers, '_CHEETAH_templateClass', Template) templateAPIClass._addCheetahPlumbingCodeToClass(providers) # CHEETAH was developed by Tavis Rudd and Mike Orr # with code, advice and input from many other volunteers. # For more information visit http://www.CheetahTemplate.org/ ################################################## ## if run from command line: if __name__ == '__main__': from Cheetah.TemplateCmdLineIface import CmdLineIface CmdLineIface(templateObj=providers()).run()
MOA-2011/e2openplugin-OpenWebif
plugin/controllers/views/ajax/providers.py
Python
gpl-2.0
5,501
[ "VisIt" ]
cdbbeea5d15ab9e6e4641da1ce4b74d81126f9f7e412d0de0b6e9586f1266d4b
import copy import os from collections import OrderedDict import numpy as np import robosuite.utils.transform_utils as T from robosuite.controllers import controller_factory, load_controller_config from robosuite.models.grippers import gripper_factory from robosuite.robots.manipulator import Manipulator from robosuite.utils.buffers import DeltaBuffer, RingBuffer from robosuite.utils.observables import Observable, sensor class Bimanual(Manipulator): """ Initializes a bimanual robot simulation object. Args: robot_type (str): Specification for specific robot arm to be instantiated within this env (e.g: "Panda") idn (int or str): Unique ID of this robot. Should be different from others controller_config (dict or list of dict --> dict of dict): If set, contains relevant controller parameters for creating custom controllers. Else, uses the default controller for this specific task. Should either be single dict if same controller is to be used for both robot arms or else it should be a list of length 2. :NOTE: In the latter case, assumes convention of [right, left] initial_qpos (sequence of float): If set, determines the initial joint positions of the robot to be instantiated for the task initialization_noise (dict): Dict containing the initialization noise parameters. The expected keys and corresponding value types are specified below: :`'magnitude'`: The scale factor of uni-variate random noise applied to each of a robot's given initial joint positions. Setting this value to "None" or 0.0 results in no noise being applied. If "gaussian" type of noise is applied then this magnitude scales the standard deviation applied, If "uniform" type of noise is applied then this magnitude sets the bounds of the sampling range :`'type'`: Type of noise to apply. Can either specify "gaussian" or "uniform" :Note: Specifying None will automatically create the required dict with "magnitude" set to 0.0 mount_type (str): type of mount, used to instantiate mount models from mount factory. Default is "default", which is the default mount associated with this robot's corresponding model. None results in no mount, and any other (valid) model overrides the default mount. gripper_type (str or list of str --> dict): type of gripper, used to instantiate gripper models from gripper factory. Default is "default", which is the default gripper associated within the 'robot' specification. None removes the gripper, and any other (valid) model overrides the default gripper. Should either be single str if same gripper type is to be used for both arms or else it should be a list of length 2 :NOTE: In the latter case, assumes convention of [right, left] control_freq (float): how many control signals to receive in every second. This sets the amount of simulation time that passes between every action input. """ def __init__( self, robot_type: str, idn=0, controller_config=None, initial_qpos=None, initialization_noise=None, mount_type="default", gripper_type="default", control_freq=20, ): self.controller = self._input2dict(None) self.controller_config = self._input2dict(copy.deepcopy(controller_config)) self.gripper = self._input2dict(None) self.gripper_type = self._input2dict(gripper_type) self.has_gripper = self._input2dict([gripper_type is not None for _, gripper_type in self.gripper_type.items()]) self.gripper_joints = self._input2dict(None) # xml joint names for gripper self._ref_gripper_joint_pos_indexes = self._input2dict(None) # xml gripper joint position indexes in mjsim self._ref_gripper_joint_vel_indexes = self._input2dict(None) # xml gripper joint velocity indexes in mjsim self._ref_joint_gripper_actuator_indexes = self._input2dict( None ) # xml gripper (pos) actuator indexes for robot in mjsim self.eef_rot_offset = self._input2dict(None) # rotation offsets from final arm link to gripper (quat) self.eef_site_id = self._input2dict(None) # xml element id for eef in mjsim self.eef_cylinder_id = self._input2dict(None) # xml element id for eef cylinder in mjsim self.torques = None # Current torques being applied self.recent_ee_forcetorques = self._input2dict(None) # Current and last forces / torques sensed at eef self.recent_ee_pose = self._input2dict(None) # Current and last eef pose (pos + ori (quat)) self.recent_ee_vel = self._input2dict(None) # Current and last eef velocity self.recent_ee_vel_buffer = self._input2dict(None) # RingBuffer holding prior 10 values of velocity values self.recent_ee_acc = self._input2dict(None) # Current and last eef acceleration super().__init__( robot_type=robot_type, idn=idn, initial_qpos=initial_qpos, initialization_noise=initialization_noise, mount_type=mount_type, control_freq=control_freq, ) def _load_controller(self): """ Loads controller to be used for dynamic trajectories """ # Flag for loading urdf once (only applicable for IK controllers) urdf_loaded = False # Load controller configs for both left and right arm for arm in self.arms: # First, load the default controller if none is specified if not self.controller_config[arm]: # Need to update default for a single agent controller_path = os.path.join( os.path.dirname(__file__), "..", "controllers/config/{}.json".format(self.robot_model.default_controller_config[arm]), ) self.controller_config[arm] = load_controller_config(custom_fpath=controller_path) # Assert that the controller config is a dict file: # NOTE: "type" must be one of: {JOINT_POSITION, JOINT_TORQUE, JOINT_VELOCITY, # OSC_POSITION, OSC_POSE, IK_POSE} assert ( type(self.controller_config[arm]) == dict ), "Inputted controller config must be a dict! Instead, got type: {}".format( type(self.controller_config[arm]) ) # Add to the controller dict additional relevant params: # the robot name, mujoco sim, eef_name, actuator_range, joint_indexes, timestep (model) freq, # policy (control) freq, and ndim (# joints) self.controller_config[arm]["robot_name"] = self.name self.controller_config[arm]["sim"] = self.sim self.controller_config[arm]["eef_name"] = self.gripper[arm].important_sites["grip_site"] self.controller_config[arm]["eef_rot_offset"] = self.eef_rot_offset[arm] self.controller_config[arm]["ndim"] = self._joint_split_idx self.controller_config[arm]["policy_freq"] = self.control_freq (start, end) = (None, self._joint_split_idx) if arm == "right" else (self._joint_split_idx, None) self.controller_config[arm]["joint_indexes"] = { "joints": self.joint_indexes[start:end], "qpos": self._ref_joint_pos_indexes[start:end], "qvel": self._ref_joint_vel_indexes[start:end], } self.controller_config[arm]["actuator_range"] = ( self.torque_limits[0][start:end], self.torque_limits[1][start:end], ) # Only load urdf the first time this controller gets called self.controller_config[arm]["load_urdf"] = True if not urdf_loaded else False urdf_loaded = True # Instantiate the relevant controller self.controller[arm] = controller_factory(self.controller_config[arm]["type"], self.controller_config[arm]) def load_model(self): """ Loads robot and optionally add grippers. """ # First, run the superclass method to load the relevant model super().load_model() # Verify that the loaded model is of the correct type for this robot if self.robot_model.arm_type != "bimanual": raise TypeError( "Error loading robot model: Incompatible arm type specified for this robot. " "Requested model arm type: {}, robot arm type: {}".format(self.robot_model.arm_type, type(self)) ) # Now, load the gripper if necessary for arm in self.arms: if self.has_gripper[arm]: if self.gripper_type[arm] == "default": # Load the default gripper from the robot file self.gripper[arm] = gripper_factory( self.robot_model.default_gripper[arm], idn="_".join((str(self.idn), arm)) ) else: # Load user-specified gripper self.gripper[arm] = gripper_factory(self.gripper_type[arm], idn="_".join((str(self.idn), arm))) else: # Load null gripper self.gripper[arm] = gripper_factory(None, idn="_".join((str(self.idn), arm))) # Grab eef rotation offset self.eef_rot_offset[arm] = T.quat_multiply( self.robot_model.hand_rotation_offset[arm], self.gripper[arm].rotation_offset ) # Add this gripper to the robot model self.robot_model.add_gripper(self.gripper[arm], self.robot_model.eef_name[arm]) def reset(self, deterministic=False): """ Sets initial pose of arm and grippers. Overrides gripper joint configuration if we're using a deterministic reset (e.g.: hard reset from xml file) Args: deterministic (bool): If true, will not randomize initializations within the sim """ # First, run the superclass method to reset the position and controller super().reset(deterministic) if not deterministic: # Now, reset the gripper if necessary for arm in self.arms: if self.has_gripper[arm]: self.sim.data.qpos[self._ref_gripper_joint_pos_indexes[arm]] = self.gripper[arm].init_qpos # Setup arm-specific values for arm in self.arms: # Update base pos / ori references in controller (technically only needs to be called once) self.controller[arm].update_base_pose(self.base_pos, self.base_ori) # Setup buffers for eef values self.recent_ee_forcetorques[arm] = DeltaBuffer(dim=6) self.recent_ee_pose[arm] = DeltaBuffer(dim=7) self.recent_ee_vel[arm] = DeltaBuffer(dim=6) self.recent_ee_vel_buffer[arm] = RingBuffer(dim=6, length=10) self.recent_ee_acc[arm] = DeltaBuffer(dim=6) def setup_references(self): """ Sets up necessary reference for robots, grippers, and objects. Note that this should get called during every reset from the environment """ # First, run the superclass method to setup references for joint-related values / indexes super().setup_references() # Now, add references to gripper if necessary # indices for grippers in qpos, qvel for arm in self.arms: if self.has_gripper[arm]: self.gripper_joints[arm] = list(self.gripper[arm].joints) self._ref_gripper_joint_pos_indexes[arm] = [ self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_joints[arm] ] self._ref_gripper_joint_vel_indexes[arm] = [ self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_joints[arm] ] self._ref_joint_gripper_actuator_indexes[arm] = [ self.sim.model.actuator_name2id(actuator) for actuator in self.gripper[arm].actuators ] # IDs of sites for eef visualization self.eef_site_id[arm] = self.sim.model.site_name2id(self.gripper[arm].important_sites["grip_site"]) self.eef_cylinder_id[arm] = self.sim.model.site_name2id(self.gripper[arm].important_sites["grip_cylinder"]) def control(self, action, policy_step=False): """ Actuate the robot with the passed joint velocities and gripper control. Args: action (np.array): The control to apply to the robot. The first @self.robot_model.dof dimensions should be the desired normalized joint velocities and if the robot has a gripper, the next @self.gripper.dof dimensions should be actuation controls for the gripper. :NOTE: Assumes inputted actions are of form: [right_arm_control, right_gripper_control, left_arm_control, left_gripper_control] policy_step (bool): Whether a new policy step (action) is being taken Raises: AssertionError: [Invalid action dimension] """ # clip actions into valid range assert len(action) == self.action_dim, "environment got invalid action dimension -- expected {}, got {}".format( self.action_dim, len(action) ) self.torques = np.array([]) # Now execute actions for each arm for arm in self.arms: # Make sure to split action space correctly (start, end) = (None, self._action_split_idx) if arm == "right" else (self._action_split_idx, None) sub_action = action[start:end] gripper_action = None if self.has_gripper[arm]: # get all indexes past controller dimension indexes gripper_action = sub_action[self.controller[arm].control_dim :] sub_action = sub_action[: self.controller[arm].control_dim] # Update the controller goal if this is a new policy step if policy_step: self.controller[arm].set_goal(sub_action) # Now run the controller for a step and add it to the torques self.torques = np.concatenate((self.torques, self.controller[arm].run_controller())) # Get gripper action, if applicable if self.has_gripper[arm]: self.grip_action(gripper=self.gripper[arm], gripper_action=gripper_action) # Clip the torques low, high = self.torque_limits self.torques = np.clip(self.torques, low, high) # Apply joint torque control self.sim.data.ctrl[self._ref_joint_actuator_indexes] = self.torques # If this is a policy step, also update buffers holding recent values of interest if policy_step: # Update proprioceptive values self.recent_qpos.push(self._joint_positions) self.recent_actions.push(action) self.recent_torques.push(self.torques) for arm in self.arms: # Update arm-specific proprioceptive values self.recent_ee_forcetorques[arm].push(np.concatenate((self.ee_force[arm], self.ee_torque[arm]))) self.recent_ee_pose[arm].push( np.concatenate((self.controller[arm].ee_pos, T.mat2quat(self.controller[arm].ee_ori_mat))) ) self.recent_ee_vel[arm].push( np.concatenate((self.controller[arm].ee_pos_vel, self.controller[arm].ee_ori_vel)) ) # Estimation of eef acceleration (averaged derivative of recent velocities) self.recent_ee_vel_buffer[arm].push( np.concatenate((self.controller[arm].ee_pos_vel, self.controller[arm].ee_ori_vel)) ) diffs = np.vstack( [ self.recent_ee_acc[arm].current, self.control_freq * np.diff(self.recent_ee_vel_buffer[arm].buf, axis=0), ] ) ee_acc = np.array([np.convolve(col, np.ones(10) / 10.0, mode="valid")[0] for col in diffs.transpose()]) self.recent_ee_acc[arm].push(ee_acc) def _visualize_grippers(self, visible): """ Visualizes the gripper site(s) if applicable. Args: visible (bool): True if visualizing the gripper for this arm. """ for arm in self.arms: self.gripper[arm].set_sites_visibility(sim=self.sim, visible=visible) def setup_observables(self): """ Sets up observables to be used for this robot Returns: OrderedDict: Dictionary mapping observable names to its corresponding Observable object """ # Get general robot observables first observables = super().setup_observables() # Get prefix from robot model to avoid naming clashes for multiple robots and define observables modality pf = self.robot_model.naming_prefix modality = f"{pf}proprio" sensors = [] names = [] for arm in self.arms: # Add in eef info arm_sensors, arm_sensor_names = self._create_arm_sensors(arm=arm, modality=modality) sensors += arm_sensors names += arm_sensor_names # Create observables for this robot for name, s in zip(names, sensors): observables[name] = Observable( name=name, sensor=s, sampling_rate=self.control_freq, ) return observables def _create_arm_sensors(self, arm, modality): """ Helper function to create sensors for a given arm. This is abstracted in a separate function call so that we don't have local function naming collisions during the _setup_observables() call. Args: arm (str): Arm to create sensors for modality (str): Modality to assign to all sensors Returns: 2-tuple: sensors (list): Array of sensors for the given arm names (list): array of corresponding observable names """ pf = self.robot_model.naming_prefix # eef features @sensor(modality=modality) def eef_pos(obs_cache): return np.array(self.sim.data.site_xpos[self.eef_site_id[arm]]) @sensor(modality=modality) def eef_quat(obs_cache): return T.convert_quat(self.sim.data.get_body_xquat(self.robot_model.eef_name[arm]), to="xyzw") sensors = [eef_pos, eef_quat] names = [f"{pf}{arm}_eef_pos", f"{pf}{arm}_eef_quat"] # add in gripper sensors if this robot has a gripper if self.has_gripper[arm]: @sensor(modality=modality) def gripper_qpos(obs_cache): return np.array([self.sim.data.qpos[x] for x in self._ref_gripper_joint_pos_indexes[arm]]) @sensor(modality=modality) def gripper_qvel(obs_cache): return np.array([self.sim.data.qvel[x] for x in self._ref_gripper_joint_vel_indexes[arm]]) sensors += [gripper_qpos, gripper_qvel] names += [f"{pf}{arm}_gripper_qpos", f"{pf}{arm}_gripper_qvel"] return sensors, names def _input2dict(self, inp): """ Helper function that converts an input that is either a single value or a list into a dict with keys for each arm: "right", "left" Args: inp (str or list or None): Input value to be converted to dict :Note: If inp is a list, then assumes format is [right, left] Returns: dict: Inputs mapped for each robot arm """ # First, convert to list if necessary if type(inp) is not list: inp = [inp for _ in range(2)] # Now, convert list to dict and return return {key: value for key, value in zip(self.arms, inp)} @property def arms(self): """ Returns name of arms used as naming convention throughout this module Returns: 2-tuple: ('right', 'left') """ return "right", "left" @property def action_limits(self): """ Action lower/upper limits per dimension. Returns: 2-tuple: - (np.array) minimum (low) action values - (np.array) maximum (high) action values """ # Action limits based on controller limits low, high = [], [] for arm in self.arms: low_g, high_g = ( ([-1] * self.gripper[arm].dof, [1] * self.gripper[arm].dof) if self.has_gripper[arm] else ([], []) ) low_c, high_c = self.controller[arm].control_limits low, high = np.concatenate([low, low_c, low_g]), np.concatenate([high, high_c, high_g]) return low, high @property def ee_ft_integral(self): """ Returns: dict: each arm-specific entry specifies the integral over time of the applied ee force-torque for that arm """ vals = {} for arm in self.arms: vals[arm] = np.abs((1.0 / self.control_freq) * self.recent_ee_forcetorques[arm].average) return vals @property def ee_force(self): """ Returns: dict: each arm-specific entry specifies the force applied at the force sensor at the robot arm's eef """ vals = {} for arm in self.arms: vals[arm] = self.get_sensor_measurement(self.gripper[arm].important_sensors["force_ee"]) return vals @property def ee_torque(self): """ Returns: dict: each arm-specific entry specifies the torque applied at the torque sensor at the robot arm's eef """ vals = {} for arm in self.arms: vals[arm] = self.get_sensor_measurement(self.gripper[arm].important_sensors["torque_ee"]) return vals @property def _hand_pose(self): """ Returns: dict: each arm-specific entry specifies the eef pose in base frame of robot. """ vals = {} for arm in self.arms: vals[arm] = self.pose_in_base_from_name(self.robot_model.eef_name[arm]) return vals @property def _hand_quat(self): """ Returns: dict: each arm-specific entry specifies the eef quaternion in base frame of robot. """ vals = {} orns = self._hand_orn for arm in self.arms: vals[arm] = T.mat2quat(orns[arm]) return vals @property def _hand_total_velocity(self): """ Returns: dict: each arm-specific entry specifies the total eef velocity (linear + angular) in the base frame as a numpy array of shape (6,) """ vals = {} for arm in self.arms: # Determine correct start, end points based on arm (start, end) = (None, self._joint_split_idx) if arm == "right" else (self._joint_split_idx, None) # Use jacobian to translate joint velocities to end effector velocities. Jp = self.sim.data.get_body_jacp(self.robot_model.eef_name[arm]).reshape((3, -1)) Jp_joint = Jp[:, self._ref_joint_vel_indexes[start:end]] Jr = self.sim.data.get_body_jacr(self.robot_model.eef_name[arm]).reshape((3, -1)) Jr_joint = Jr[:, self._ref_joint_vel_indexes[start:end]] eef_lin_vel = Jp_joint.dot(self._joint_velocities) eef_rot_vel = Jr_joint.dot(self._joint_velocities) vals[arm] = np.concatenate([eef_lin_vel, eef_rot_vel]) return vals @property def _hand_pos(self): """ Returns: dict: each arm-specific entry specifies the position of eef in base frame of robot. """ vals = {} poses = self._hand_pose for arm in self.arms: eef_pose_in_base = poses[arm] vals[arm] = eef_pose_in_base[:3, 3] return vals @property def _hand_orn(self): """ Returns: dict: each arm-specific entry specifies the orientation of eef in base frame of robot as a rotation matrix. """ vals = {} poses = self._hand_pose for arm in self.arms: eef_pose_in_base = poses[arm] vals[arm] = eef_pose_in_base[:3, :3] return vals @property def _hand_vel(self): """ Returns: dict: each arm-specific entry specifies the velocity of eef in base frame of robot. """ vels = self._hand_total_velocity for arm in self.arms: vels[arm] = vels[arm][:3] return vels @property def _hand_ang_vel(self): """ Returns: dict: each arm-specific entry specifies the angular velocity of eef in base frame of robot. """ vels = self._hand_total_velocity for arm in self.arms: vels[arm] = vels[arm][3:] return vels @property def _action_split_idx(self): """ Grabs the index that correctly splits the right arm from the left arm actions :NOTE: Assumes inputted actions are of form: [right_arm_control, right_gripper_control, left_arm_control, left_gripper_control] Returns: int: Index splitting right from left arm actions """ return ( self.controller["right"].control_dim + self.gripper["right"].dof if self.has_gripper["right"] else self.controller["right"].control_dim ) @property def _joint_split_idx(self): """ Returns: int: the index that correctly splits the right arm from the left arm joints """ return int(len(self.robot_joints) / 2)
ARISE-Initiative/robosuite
robosuite/robots/bimanual.py
Python
mit
26,392
[ "Gaussian" ]
6b1fa7f9c30ebb60cfc58ff66a0e03feed57c1df0449ee9665aa6ba1c9f37b58
# This file is part of xrayutilities. # # xrayutilities is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. # # Copyright (C) 2012 Dominik Kriegner <dominik.kriegner@gmail.com> import os import numpy import xrayutilities as xu # import matplotlib.pyplot as plt # create material Calcite = xu.materials.Crystal.fromCIF(os.path.join("data", "Calcite.cif")) # experiment class with some weird directions expcal = xu.HXRD(Calcite.Q(-2, 1, 9), Calcite.Q(1, -1, 4)) powder_cal = xu.simpack.PowderDiffraction(Calcite) print(powder_cal)
dkriegner/xrayutilities
examples/xrayutilities_io_cif_parser.py
Python
gpl-2.0
1,102
[ "CRYSTAL" ]
f14044ac8551e8ae4cfad85800f75418954fdaf9e344a05a5a19d2c2dbc0baff
# $Id$ import operator from itcc.Torsionfit import cmpmol from itcc.Tinker import tinker from itcc.Tools import tools __revision__ = '$Rev$' optimize = tinker.batchoptimize energy = tinker.batchenergy class Meritresult(dict): pass def chkdeform(flist1, flist2): numdeformstru = 0 data = cmpmol.batchcmpmolfile(flist1, flist2) for i in range(len(data)): x = data[i] ac = x.maxanglechange() tc = x.maxtorsionchange() print flist1[i], ac[0], ac[3], tc[0], tc[3] if x.maxanglechange() >= 5.0 or x.maxtorsionchange() >= 10.0: numdeformstru += 1 return numdeformstru def merit(param): applyprm(param) iflist, oflist, refene = getfilelist() optene = tinker.batchoptimize(iflist, oflist) difene = [x-y for x, y in zip(refene, optene)] result = tools.stdev(difene) # print chkdeform(iflist, oflist) return result class Detailmerit(object): __slots__ = ['iflist', 'oflist', 'refene', 'optene', 'disres', 'result'] def detailmerit(self, iflist, oflist, refene): self.iflist = iflist[:] self.oflist = oflist[:] self.refene = refene[:] result = {} optimize(iflist, oflist) optene = energy(oflist) difene = map(operator.sub, refene, optene) result['Energy RMS'] = tools.STDD(optene, refene) result['Energy UME'] = tools.MADMD(optene, refene) result['Energy MAX'] = (max(difene) - min(difene))/2.0 disres = cmpmol.batchcmpmolfile(iflist, oflist) result['Displacement RMS'] = disres.disRMS() result['Displacement Mean'] = disres.dismean() result['Displacement MAX'] = disres.dismax() self.optene = optene self.disres = disres self.result = result return result def applyprm(param, ifname = 'oplsaa-temp.prm', ofname = 'oplsaa-exp.prm'): ifile = file(ifname) ofile = file(ofname, 'w+') ofile.writelines(ifile.readlines()) ifile.close() ofile.write(str(param)) ofile.close() def getfilelist(ifname = 'reference.csv'): ifile = file(ifname) lines = ifile.readlines() ifile.close() words = [x.split() for x in lines] iflist = [x[0] for x in words] oflist = [x[1] for x in words] reference = [float(x[2]) for x in words] return iflist, oflist, reference def test2(): iflist, refene = getfilelist() oflist = [x[:-4] + 'o' + x[-4:] for x in iflist] print chkdeform(iflist, oflist) def test(): from itcc.Tinker import parameter params = ((1,21,30,1,4.669,5.124,0.0), (1,1,30,21,-1.220,-0.126,0.422), (21,1,1,30,0.845,-0.962,0.713), (1,1,21,30,0.0,0.0,-0.553)) objparams = parameter.Parameters() for x in params: objparams.append(parameter.Torsionparameter(*x)) print merit(objparams) if __name__ == '__main__': test()
lidaobing/itcc
itcc/torsionfit/merit.py
Python
gpl-3.0
3,096
[ "TINKER" ]
fe5ed7461cc39958851e926ea523e7ca6b3a0a78840a1d6020a47bb3a824d712
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2008 Brian G. Matherly # Copyright (C) 2008 Jerome Rapinat # Copyright (C) 2008 Benny Malengier # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # gen.filters.rules/Place/_HasGallery.py # $Id$ # #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ....ggettext import gettext as _ #------------------------------------------------------------------------- # # GRAMPS modules # #------------------------------------------------------------------------- from .._hasgallerybase import HasGalleryBase #------------------------------------------------------------------------- # "Places who have media object reference" #------------------------------------------------------------------------- class HasGallery(HasGalleryBase): """Rule that checks for place who has media object reference""" name = _('Places with <count> media') description = _("Matches places with a certain number of items in the gallery")
arunkgupta/gramps
gramps/gen/filters/rules/place/_hasgallery.py
Python
gpl-2.0
1,787
[ "Brian" ]
334803477fad4e1c37a797a94893b1a3011ce60b8cfc9ef286146af13ce2f1bc
""" This migration script adds the tables necessary to support tagging of histories, datasets, and history-dataset associations (user views of datasets). If using mysql, this script will display the following error, which is corrected in the next migration script: history_dataset_association_tag_association table failed: (OperationalError) (1059, "Identifier name 'ix_history_dataset_association_tag_association_history_dataset_association_id' is too long) """ from sqlalchemy import * from migrate import * import datetime now = datetime.datetime.utcnow # Need our custom types, but don't import anything else from model from galaxy.model.custom_types import * import logging log = logging.getLogger( __name__ ) metadata = MetaData( migrate_engine ) def display_migration_details(): print "" print "This migration script adds the tables necessary to support tagging of histories," print "datasets, and history-dataset associations (user views of datasets)." print "" print "If using mysql, this script will display the following error, which is " print "corrected in the next migration script:" print "history_dataset_association_tag_association table failed: " print "(OperationalError) (1059, 'Identifier name " print "'ix_history_dataset_association_tag_association_history_dataset_association_id'" print "is too long)" # New tables to support tagging of histories, datasets, and history-dataset associations. Tag_table = Table( "tag", metadata, Column( "id", Integer, primary_key=True ), Column( "type", Integer ), Column( "parent_id", Integer, ForeignKey( "tag.id" ) ), Column( "name", TrimmedString(255) ), UniqueConstraint( "name" ) ) HistoryTagAssociation_table = Table( "history_tag_association", metadata, Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ), Column( "tag_id", Integer, ForeignKey( "tag.id" ), index=True ), Column( "user_tname", TrimmedString(255), index=True), Column( "value", TrimmedString(255), index=True), Column( "user_value", TrimmedString(255), index=True) ) DatasetTagAssociation_table = Table( "dataset_tag_association", metadata, Column( "dataset_id", Integer, ForeignKey( "dataset.id" ), index=True ), Column( "tag_id", Integer, ForeignKey( "tag.id" ), index=True ), Column( "user_tname", TrimmedString(255), index=True), Column( "value", TrimmedString(255), index=True), Column( "user_value", TrimmedString(255), index=True) ) HistoryDatasetAssociationTagAssociation_table = Table( "history_dataset_association_tag_association", metadata, Column( "history_dataset_association_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ), Column( "tag_id", Integer, ForeignKey( "tag.id" ), index=True ), Column( "user_tname", TrimmedString(255), index=True), Column( "value", TrimmedString(255), index=True), Column( "user_value", TrimmedString(255), index=True) ) def upgrade(): display_migration_details() metadata.reflect() try: Tag_table.create() except Exception, e: print str(e) log.debug( "Creating tag table failed: %s" % str( e ) ) try: HistoryTagAssociation_table.create() except Exception, e: print str(e) log.debug( "Creating history_tag_association table failed: %s" % str( e ) ) try: DatasetTagAssociation_table.create() except Exception, e: print str(e) log.debug( "Creating dataset_tag_association table failed: %s" % str( e ) ) try: HistoryDatasetAssociationTagAssociation_table.create() except Exception, e: print str(e) log.debug( "Creating history_dataset_association_tag_association table failed: %s" % str( e ) ) def downgrade(): metadata.reflect() try: Tag_table.drop() except Exception, e: print str(e) log.debug( "Dropping tag table failed: %s" % str( e ) ) try: HistoryTagAssociation_table.drop() except Exception, e: print str(e) log.debug( "Dropping history_tag_association table failed: %s" % str( e ) ) try: DatasetTagAssociation_table.drop() except Exception, e: print str(e) log.debug( "Dropping dataset_tag_association table failed: %s" % str( e ) ) try: HistoryDatasetAssociationTagAssociation_table.drop() except Exception, e: print str(e) log.debug( "Dropping history_dataset_association_tag_association table failed: %s" % str( e ) )
volpino/Yeps-EURAC
lib/galaxy/model/migrate/versions/0015_tagging.py
Python
mit
4,565
[ "Galaxy" ]
d8edc73f1fc66d65b3802edd49dde2e85cde40608ea93e9af680c6cedf4ec72e
''' ------------------------------------------------------------------------------- This script is an example of making changes on a CUCME/CUBE box, which is just a router with special options enabled. ------------------------------------------------------------------------------- ''' #Global imports from netmiko import ConnectHandler from datetime import datetime import csv, os.path #Local imports import credentials # Begin timing the script start_time = datetime.now() # Define the primary function (to be moved to a separate module some day...) def nc(username, password, secret, customercsv, i): with open(customercsv, mode='r') as csvfile: reader = csv.DictReader(csvfile) # Now iterate through every row in the CSVfile and make dictionaries for row in reader: hostname = row['SysName'] device_type = row['device_type'] ip = row['IP_Address'] switch = { 'device_type': device_type, 'ip': ip, 'username': username, 'password': password, 'secret': secret, 'verbose': False, } # This is your connection handler for commands from here on out net_connect = ConnectHandler(**switch) # Now make it pretty print("\n\n>>>>>>>>> Device {0} <<<<<<<<<\n".format(row['SysName'])) # Enable mode net_connect.enable() # Here's the commands to be run in config mode fix_call_waiting = [i, 'no call-waiting beep', 'exit'] # Send the fix net_connect.send_config_set(fix_call_waiting) # Make sure it's applied verify_config = "show run | beg {0}".format(i) con_ret = net_connect.send_command(verify_config) # Formatted to match all text before the first '!', due to sh run sucking hard print(con_ret.partition("!")[0]) # Now let everyone else know we're doing things (ELK is working, right?) sendlog = str('send log 0 "Added no call-waiting beep to {}"').format(i) net_connect.send_command(sendlog) print("\n>>>>>>>>> End <<<<<<<<<") # Disconnect from this session net_connect.disconnect() # Grab the Customer name to search customer = input('Customer name: ') customercsv = str(customer) + ".csv" ephone = str(customer) + "-ephone.txt" # Flesh out these variables using the credentials.cred_csv module username, password, secret = credentials.cred_csv() # Run the primary function in this program with open(ephone, 'r') as inputfile: data=inputfile.readlines() for i in data: nc(username, password, secret, customercsv, i) # Stop time! end_time = datetime.now() # How long did it run? total_time = end_time - start_time print("\nTotal time for script: \n" + str(total_time))
admiralspark/NetSpark-Scripts
Example_Scripts/Cisco/ephoneChange.py
Python
gpl-3.0
2,977
[ "Elk" ]
6971f9472384c19271b03446be11f1497ec71a1000a47b92b578314ab239bc42
from typing import Optional ################################################################ # Zulip Server settings. # # This file controls settings that affect the whole Zulip server. # See our documentation at: # https://zulip.readthedocs.io/en/latest/production/settings.html # # For developer documentation on the Zulip settings system, see: # https://zulip.readthedocs.io/en/latest/subsystems/settings.html # # Remember to restart the server after making changes here! # su zulip -c /home/zulip/deployments/current/scripts/restart-server ################################ # Mandatory settings. # # These settings MUST be set in production. In a development environment, # sensible default values will be used. # The user-accessible Zulip hostname for this installation, e.g. # zulip.example.com. This should match what users will put in their # web browser. If you want to allow multiple hostnames, add the rest # to ALLOWED_HOSTS. # # If you need to access the server on a specific port, you should set # EXTERNAL_HOST to e.g. zulip.example.com:1234 here. EXTERNAL_HOST = 'zulip.example.com' # The email address for the person or team who maintains the Zulip # installation. Note that this is a public-facing email address; it may # appear on 404 pages, is used as the sender's address for many automated # emails, and is advertised as a support address. An email address like # support@example.com is totally reasonable, as is admin@example.com. # Do not put a display name; e.g. 'support@example.com', not # 'Zulip Support <support@example.com>'. ZULIP_ADMINISTRATOR = 'zulip-admin@example.com' ################ # Outgoing email (SMTP) settings. # # Zulip needs to be able to send email (that is, use SMTP) so it can # confirm new users' email addresses and send notifications. # # If you don't already have an SMTP provider, free ones are available. # # For more details, including a list of free SMTP providers and # advice for troubleshooting, see the Zulip documentation: # https://zulip.readthedocs.io/en/latest/production/email.html # EMAIL_HOST and EMAIL_HOST_USER are generally required. #EMAIL_HOST = 'smtp.example.com' #EMAIL_HOST_USER = '' # Passwords and secrets are not stored in this file. The password # for user EMAIL_HOST_USER goes in `/etc/zulip/zulip-secrets.conf`. # In that file, set `email_password`. For example: # email_password = abcd1234 # EMAIL_USE_TLS and EMAIL_PORT are required for most SMTP providers. #EMAIL_USE_TLS = True #EMAIL_PORT = 587 ################################ # Optional settings. # The noreply address to be used as the sender for certain generated # emails. Messages sent to this address could contain sensitive user # data and should not be delivered anywhere. The default is # e.g. noreply-{random_token}@zulip.example.com (if EXTERNAL_HOST is # zulip.example.com). There are potential security issues if you set # ADD_TOKENS_TO_NOREPLY_ADDRESS=False to remove the token; see # https://zulip.readthedocs.io/en/latest/production/email.html for details. #ADD_TOKENS_TO_NOREPLY_ADDRESS = True #TOKENIZED_NOREPLY_EMAIL_ADDRESS = "noreply-{token}@example.com" # Used for noreply emails only if ADD_TOKENS_TO_NOREPLY_ADDRESS=False #NOREPLY_EMAIL_ADDRESS = 'noreply@example.com' # Many countries and bulk mailers require certain types of email to display # a physical mailing address to comply with anti-spam legislation. # Non-commercial and non-public-facing installations are unlikely to need # this setting. # The address should have no newlines. #PHYSICAL_ADDRESS = '' # A comma-separated list of strings representing the host/domain names # that your users can enter in their browsers to access Zulip. # This is a security measure; for details, see the Django documentation: # https://docs.djangoproject.com/en/1.11/ref/settings/#allowed-hosts # # Zulip automatically adds to this list 'localhost', '127.0.0.1', and # patterns representing EXTERNAL_HOST and subdomains of it. If you are # accessing your server by other hostnames, list them here. # # Note that these should just be hostnames, without port numbers. #ALLOWED_HOSTS = ['zulip-alias.example.com', '192.0.2.1'] ################ # Authentication settings. # Enable at least one of the following authentication backends. # See https://zulip.readthedocs.io/en/latest/production/authentication-methods.html # for documentation on our authentication backends. # # The install process requires EmailAuthBackend (the default) to be # enabled. If you want to disable it, do so after creating the # initial realm and user. AUTHENTICATION_BACKENDS = ( 'zproject.backends.EmailAuthBackend', # Email and password; just requires SMTP setup # 'zproject.backends.GoogleMobileOauth2Backend', # Google Apps, setup below # 'zproject.backends.GitHubAuthBackend', # GitHub auth, setup below # 'zproject.backends.AzureADAuthBackend', # Microsoft Azure Active Directory auth, setup below # 'zproject.backends.ZulipLDAPAuthBackend', # LDAP, setup below # 'zproject.backends.ZulipRemoteUserBackend', # Local SSO, setup docs on readthedocs ) ######## # Google OAuth. # # To set up Google authentication, you'll need to do the following: # # (1) Visit https://console.developers.google.com/ , navigate to # "APIs & Services" > "Credentials", and create a "Project" which will # correspond to your Zulip instance. # # (2) Navigate to "APIs & services" > "Library", and find the # "Google+ API". Choose "Enable". # # (3) Return to "Credentials", and select "Create credentials". # Choose "OAuth client ID", and follow prompts to create a consent # screen. Fill in "Authorized redirect URIs" with a value like # https://zulip.example.com/accounts/login/google/done/ # based on your value for EXTERNAL_HOST. # # (4) You should get a client ID and a client secret. Copy them. # Use the client ID as `GOOGLE_OAUTH2_CLIENT_ID` here, and put the # client secret in zulip-secrets.conf as `google_oauth2_client_secret`. #GOOGLE_OAUTH2_CLIENT_ID = <your client ID from Google> ######## # GitHub OAuth. # # To set up GitHub authentication, you'll need to do the following: # # (1) Register an OAuth2 application with GitHub at one of: # https://github.com/settings/developers # https://github.com/organizations/ORGNAME/settings/developers # Fill in "Callback URL" with a value like # https://zulip.example.com/complete/github/ as # based on your values for EXTERNAL_HOST and SOCIAL_AUTH_SUBDOMAIN. # # (2) You should get a page with settings for your new application, # showing a client ID and a client secret. Use the client ID as # `SOCIAL_AUTH_GITHUB_KEY` here, and put the client secret in # zulip-secrets.conf as `social_auth_github_secret`. #SOCIAL_AUTH_GITHUB_KEY = <your client ID from GitHub> # (3) Optionally, you can configure the GitHub integration to only # allow members of a particular GitHub team or organization to log # into your Zulip server through GitHub authentication. To enable # this, set one of the two parameters below: #SOCIAL_AUTH_GITHUB_TEAM_ID = <your team id> #SOCIAL_AUTH_GITHUB_ORG_NAME = <your org name> # (4) If you are serving multiple Zulip organizations on different # subdomains, you need to set SOCIAL_AUTH_SUBDOMAIN. You can set it # to any subdomain on which you do not plan to host a Zulip # organization. The default recommendation, `auth`, is a reserved # subdomain; if you're using this setting, the "Callback URL" should be e.g.: # https://auth.zulip.example.com/complete/github/ # # If you end up using a subdomain other then the default # recommendation, you must also set the 'ROOT_SUBDOMAIN_ALIASES' list # to include this subdomain. # #SOCIAL_AUTH_SUBDOMAIN = 'auth' ######## # Azure Active Directory OAuth. # # To set up Microsoft Azure AD authentication, you'll need to do the following: # # (1) Register an OAuth2 application with Microsoft at: # https://apps.dev.microsoft.com # Generate a new password under Application Secrets # Generate a new platform (web) under Platforms. For Redirect URL, enter: # https://zulip.example.com/complete/azuread-oauth2/ # Add User.Read permission under Microsoft Graph Permissions # # (2) Enter the application ID for the app as SOCIAL_AUTH_AZUREAD_OAUTH2_KEY here # (3) Put the application password in zulip-secrets.conf as 'azure_oauth2_secret'. #SOCIAL_AUTH_AZUREAD_OAUTH2_KEY = '' ######## # SSO via REMOTE_USER. # # If you are using the ZulipRemoteUserBackend authentication backend, # set this to your domain (e.g. if REMOTE_USER is "username" and the # corresponding email address is "username@example.com", set # SSO_APPEND_DOMAIN = "example.com") SSO_APPEND_DOMAIN = None # type: Optional[str] ################ # Miscellaneous settings. # Support for mobile push notifications. Setting controls whether # push notifications will be forwarded through a Zulip push # notification bouncer server to the mobile apps. See # https://zulip.readthedocs.io/en/latest/production/mobile-push-notifications.html # for information on how to sign up for and configure this. #PUSH_NOTIFICATION_BOUNCER_URL = 'https://push.zulipchat.com' # Whether to redact the content of push notifications. This is less # usable, but avoids sending message content over the wire. In the # future, we're likely to replace this with an end-to-end push # notification encryption feature. #PUSH_NOTIFICATION_REDACT_CONTENT = False # Controls whether session cookies expire when the browser closes SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Session cookie expiry in seconds after the last page load SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # 2 weeks # Password strength requirements; learn about configuration at # https://zulip.readthedocs.io/en/latest/production/security-model.html. # PASSWORD_MIN_LENGTH = 6 # PASSWORD_MIN_GUESSES = 10000 # Controls whether Zulip sends "new login" email notifications. #SEND_LOGIN_EMAILS = True # Controls whether or not there is a feedback button in the UI. ENABLE_FEEDBACK = False # Feedback sent by your users will be sent to this email address. FEEDBACK_EMAIL = ZULIP_ADMINISTRATOR # Controls whether or not error reports (tracebacks) are emailed to the # server administrators. #ERROR_REPORTING = True # For frontend (JavaScript) tracebacks #BROWSER_ERROR_REPORTING = False # If True, each log message in the server logs will identify the # Python module where it came from. Useful for tracking down a # mysterious log message, but a little verbose. #LOGGING_SHOW_MODULE = False # If True, each log message in the server logs will identify the # process ID. Useful for correlating logs with information from # system-level monitoring tools. #LOGGING_SHOW_PID = False # Controls whether or not Zulip will provide inline image preview when # a link to an image is referenced in a message. Note: this feature # can also be disabled in a realm's organization settings. #INLINE_IMAGE_PREVIEW = True # Controls whether or not Zulip will provide inline previews of # websites that are referenced in links in messages. Note: this feature # can also be disabled in a realm's organization settings. #INLINE_URL_EMBED_PREVIEW = False # Controls whether or not Zulip will parse links starting with # "file:///" as a hyperlink (useful if you have e.g. an NFS share). ENABLE_FILE_LINKS = False # By default, files uploaded by users and user avatars are stored # directly on the Zulip server. You can configure files being instead # stored in Amazon S3 or another scalable data store here. See docs at: # # https://zulip.readthedocs.io/en/latest/production/upload-backends.html LOCAL_UPLOADS_DIR = "/home/zulip/uploads" #S3_AUTH_UPLOADS_BUCKET = "" #S3_AVATAR_BUCKET = "" #S3_REGION = "" # Maximum allowed size of uploaded files, in megabytes. DO NOT SET # ABOVE 80MB. The file upload implementation doesn't support chunked # uploads, so browsers will crash if you try uploading larger files. MAX_FILE_UPLOAD_SIZE = 25 # Controls whether name changes are completely disabled for this installation # This is useful in settings where you're syncing names from an integrated LDAP/Active Directory NAME_CHANGES_DISABLED = False # Controls whether users who have not uploaded an avatar will receive an avatar # from gravatar.com. ENABLE_GRAVATAR = True # To override the default avatar image if ENABLE_GRAVATAR is False, place your # custom default avatar image at /home/zulip/local-static/default-avatar.png # and uncomment the following line. #DEFAULT_AVATAR_URI = '/local-static/default-avatar.png' # To access an external postgres database you should define the host name in # REMOTE_POSTGRES_HOST, you can define the password in the secrets file in the # property postgres_password, and the SSL connection mode in REMOTE_POSTGRES_SSLMODE # Valid values for REMOTE_POSTGRES_SSLMODE are documented in the # "SSL Mode Descriptions" table in # https://www.postgresql.org/docs/9.5/static/libpq-ssl.html #REMOTE_POSTGRES_HOST = 'dbserver.example.com' #REMOTE_POSTGRES_SSLMODE = 'require' # If you want to set a Terms of Service for your server, set the path # to your markdown file, and uncomment the following line. #TERMS_OF_SERVICE = '/etc/zulip/terms.md' # Similarly if you want to set a Privacy Policy. #PRIVACY_POLICY = '/etc/zulip/privacy.md' ################ # Twitter integration. # Zulip supports showing inline Tweet previews when a tweet is linked # to in a message. To support this, Zulip must have access to the # Twitter API via OAuth. To obtain the various access tokens needed # below, you must register a new application under your Twitter # account by doing the following: # # 1. Log in to http://dev.twitter.com. # 2. In the menu under your username, click My Applications. From this page, create a new application. # 3. Click on the application you created and click "create my access token". # 4. Fill in the values for twitter_consumer_key, twitter_consumer_secret, twitter_access_token_key, # and twitter_access_token_secret in /etc/zulip/zulip-secrets.conf. ################ # Email gateway integration. # # The Email gateway integration supports sending messages into Zulip # by sending an email. This is useful for receiving notifications # from third-party services that only send outgoing notifications via # email. Once this integration is configured, each stream will have # an email address documented on the stream settings page and emails # sent to that address will be delivered into the stream. # # There are two ways to configure email mirroring in Zulip: # 1. Local delivery: A MTA runs locally and passes mail directly to Zulip # 2. Polling: Checks an IMAP inbox every minute for new messages. # # The local delivery configuration is preferred for production because # it supports nicer looking email addresses and has no cron delay, # while the polling mechanism is better for testing/developing this # feature because it doesn't require a public-facing IP/DNS setup. # # The main email mirror setting is the email address pattern, where # you specify the email address format you'd like the integration to # use. It should be one of the following: # %s@zulip.example.com (for local delivery) # username+%s@example.com (for polling if EMAIL_GATEWAY_LOGIN=username@example.com) EMAIL_GATEWAY_PATTERN = "" # # If you are using local delivery, EMAIL_GATEWAY_PATTERN is all you need # to change in this file. You will also need to enable the Zulip postfix # configuration to support local delivery by adding # , zulip::postfix_localmail # to puppet_classes in /etc/zulip/zulip.conf and then running # `scripts/zulip-puppet-apply -f` to do the installation. # # You will also need to setup DNS MX records to ensure emails sent to # the hostname configured in EMAIL_GATEWAY_PATTERN will be delivered # to the Zulip postfix server you installed above. # # If you are using polling, you will need to setup an IMAP email # account dedicated to Zulip email gateway messages. The model is # that users will send emails to that account via an address of the # form username+%s@example.com (which is what you will set as # EMAIL_GATEWAY_PATTERN); your email provider should deliver those # emails to the username@example.com inbox. Then you run in a cron # job `./manage.py email_mirror` (see puppet/zulip/files/cron.d/email-mirror), # which will check that inbox and batch-process any new messages. # # You will need to configure authentication for the email mirror # command to access the IMAP mailbox below and in zulip-secrets.conf. # # The IMAP login; username here and password as email_gateway_password in # zulip-secrets.conf. EMAIL_GATEWAY_LOGIN = "" # The IMAP server & port to connect to EMAIL_GATEWAY_IMAP_SERVER = "" EMAIL_GATEWAY_IMAP_PORT = 993 # The IMAP folder name to check for emails. All emails sent to EMAIL_GATEWAY_PATTERN above # must be delivered to this folder EMAIL_GATEWAY_IMAP_FOLDER = "INBOX" ################ # LDAP integration. # # Zulip supports retrieving information about users via LDAP, and # optionally using LDAP as an authentication mechanism. import ldap from django_auth_ldap.config import LDAPSearch, GroupOfNamesType, LDAPSearchUnion ######## # LDAP integration, part 1: Connecting to the LDAP server. # # For detailed instructions, see the Zulip documentation: # https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#ldap # The LDAP server to connect to. Setting this enables Zulip # automatically fetching each new user's name from LDAP. # Example: "ldaps://ldap.example.com" AUTH_LDAP_SERVER_URI = "" # The DN of the user to bind as (i.e., authenticate as) in order to # query LDAP. If unset, Zulip does an anonymous bind. AUTH_LDAP_BIND_DN = "" # Passwords and secrets are not stored in this file. The password # corresponding to AUTH_LDAP_BIND_DN goes in `/etc/zulip/zulip-secrets.conf`. # In that file, set `auth_ldap_bind_password`. For example: # auth_ldap_bind_password = abcd1234 ######## # LDAP integration, part 2: Mapping user info from LDAP to Zulip. # # For detailed instructions, see the Zulip documentation: # https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#ldap # The LDAP search query to find a given user. # # The arguments to `LDAPSearch` are (base DN, scope, filter). In the # filter, the string `%(user)s` is a Python placeholder. The Zulip # server will replace this with the user's Zulip username, i.e. the # name they type into the Zulip login form. # # For more details and alternatives, see the documentation linked above. AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=users,dc=example,dc=com", ldap.SCOPE_SUBTREE, "(uid=%(user)s)") # Domain to combine with a user's username to figure out their email address. # # If users log in as e.g. "sam" when their email address is "sam@example.com", # set this to "example.com". If users log in with their full email addresses, # leave as None; if the username -> email address mapping isn't so simple, # leave as None and see LDAP_EMAIL_ATTR. LDAP_APPEND_DOMAIN = None # type: Optional[str] # LDAP attribute to find a user's email address. # # Leave as None if users log in with their email addresses, # or if using LDAP_APPEND_DOMAIN. LDAP_EMAIL_ATTR = None # type: Optional[str] # This map defines how to populate attributes of a Zulip user from LDAP. # # The format is `zulip_name: ldap_name`; each entry maps a Zulip # concept (on the left) to the LDAP attribute name (on the right) your # LDAP database uses for the same concept. AUTH_LDAP_USER_ATTR_MAP = { # full_name is required; common values include "cn" or "displayName". "full_name": "cn", # User avatars can be pulled from the LDAP "thumbnailPhoto"/"jpegPhoto" field. # "avatar": "thumbnailPhoto", # This line is for having Zulip to automatically deactivate users # who are disabled in LDAP/Active Directory (and reactivate users who are not). # See docs for usage details and precise semantics. # "userAccountControl": "userAccountControl", } ################ # Miscellaneous settings. # The default CAMO_URI of '/external_content/' is served by the camo # setup in the default Voyager nginx configuration. Setting CAMO_URI # to '' will disable the Camo integration. CAMO_URI = '/external_content/' # RabbitMQ configuration # # By default, Zulip connects to rabbitmq running locally on the machine, # but Zulip also supports connecting to RabbitMQ over the network; # to use a remote RabbitMQ instance, set RABBITMQ_HOST here. # RABBITMQ_HOST = "localhost" # To use another rabbitmq user than the default 'zulip', set RABBITMQ_USERNAME here. # RABBITMQ_USERNAME = 'zulip' # Memcached configuration # # By default, Zulip connects to memcached running locally on the machine, # but Zulip also supports connecting to memcached over the network; # to use a remote Memcached instance, set MEMCACHED_LOCATION here. # Format HOST:PORT # MEMCACHED_LOCATION = 127.0.0.1:11211 # Redis configuration # # By default, Zulip connects to redis running locally on the machine, # but Zulip also supports connecting to redis over the network; # to use a remote Redis instance, set REDIS_HOST here. # REDIS_HOST = '127.0.0.1' # For a different redis port set the REDIS_PORT here. # REDIS_PORT = 6379 # If you set redis_password in zulip-secrets.conf, Zulip will use that password # to connect to the redis server. # Controls whether Zulip will rate-limit user requests. # RATE_LIMITING = True # By default, Zulip connects to the thumbor (the thumbnailing software # we use) service running locally on the machine. If you're running # thumbor on a different server, you can configure that by setting # THUMBOR_URL here. Setting THUMBOR_URL='' will let Zulip server know that # thumbor is not running or configured. #THUMBOR_URL = 'http://127.0.0.1:9995' # # This setting controls whether images shown in Zulip's inline image # previews should be thumbnailed by thumbor, which saves bandwidth but # can modify the image's appearance. #THUMBNAIL_IMAGES = True # Controls the Jitsi video call integration. By default, the # integration uses the SaaS meet.jit.si server. You can specify # your own Jitsi Meet server, or if you'd like to disable the # integration, set JITSI_SERVER_URL = None. #JITSI_SERVER_URL = 'jitsi.example.com'
dhcrzf/zulip
zproject/prod_settings_template.py
Python
apache-2.0
22,300
[ "VisIt" ]
7710154914accace1c18b74963507d0f4255849fa5f72a8644cafff9ab7a4f55
""" A collection of functions to find the weights and abscissas for Gaussian Quadrature. These calculations are done by finding the eigenvalues of a tridiagonal matrix whose entries are dependent on the coefficients in the recursion formula for the orthogonal polynomials with the corresponding weighting function over the interval. Many recursion relations for orthogonal polynomials are given: .. math:: a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x) The recursion relation of interest is .. math:: P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x) where :math:`P` has a different normalization than :math:`f`. The coefficients can be found as: .. math:: A_n = -a2n / a3n \\qquad B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2 where .. math:: h_n = \\int_a^b w(x) f_n(x)^2 assume: .. math:: P_0 (x) = 1 \\qquad P_{-1} (x) == 0 For the mathematical background, see [golub.welsch-1969-mathcomp]_ and [abramowitz.stegun-1965]_. References ---------- .. [golub.welsch-1969-mathcomp] Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10. .. [abramowitz.stegun-1965] Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of Mathematical Functions: with Formulas, Graphs, and Mathematical Tables*. Gaithersburg, MD: National Bureau of Standards. http://www.math.sfu.ca/~cbm/aands/ .. [townsend.trogdon.olver-2014] Townsend, A. and Trogdon, T. and Olver, S. (2014) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. :arXiv:`1410.5286`. .. [townsend.trogdon.olver-2015] Townsend, A. and Trogdon, T. and Olver, S. (2015) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. IMA Journal of Numerical Analysis :doi:`10.1093/imanum/drv002`. """ # # Author: Travis Oliphant 2000 # Updated Sep. 2003 (fixed bugs --- tested to be accurate) # SciPy imports. import numpy as np from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, int, hstack, arccos, arange) from scipy import linalg from scipy.special import airy # Local imports. from . import _ufuncs from . import _ufuncs as cephes _gam = cephes.gamma from . import specfun _polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys', 'jacobi', 'laguerre', 'genlaguerre', 'hermite', 'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt', 'sh_chebyu', 'sh_jacobi'] # Correspondence between new and old names of root functions _rootfuns_map = {'roots_legendre': 'p_roots', 'roots_chebyt': 't_roots', 'roots_chebyu': 'u_roots', 'roots_chebyc': 'c_roots', 'roots_chebys': 's_roots', 'roots_jacobi': 'j_roots', 'roots_laguerre': 'l_roots', 'roots_genlaguerre': 'la_roots', 'roots_hermite': 'h_roots', 'roots_hermitenorm': 'he_roots', 'roots_gegenbauer': 'cg_roots', 'roots_sh_legendre': 'ps_roots', 'roots_sh_chebyt': 'ts_roots', 'roots_sh_chebyu': 'us_roots', 'roots_sh_jacobi': 'js_roots'} _evalfuns = ['eval_legendre', 'eval_chebyt', 'eval_chebyu', 'eval_chebyc', 'eval_chebys', 'eval_jacobi', 'eval_laguerre', 'eval_genlaguerre', 'eval_hermite', 'eval_hermitenorm', 'eval_gegenbauer', 'eval_sh_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu', 'eval_sh_jacobi'] __all__ = _polyfuns + list(_rootfuns_map.keys()) + _evalfuns + ['poch', 'binom'] class orthopoly1d(np.poly1d): def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None, limits=None, monic=False, eval_func=None): equiv_weights = [weights[k] / wfunc(roots[k]) for k in range(len(roots))] mu = sqrt(hn) if monic: evf = eval_func if evf: knn = kn eval_func = lambda x: evf(x) / knn mu = mu / abs(kn) kn = 1.0 # compute coefficients from roots, then scale poly = np.poly1d(roots, r=True) np.poly1d.__init__(self, poly.coeffs * float(kn)) self.weights = np.array(list(zip(roots, weights, equiv_weights))) self.weight_func = wfunc self.limits = limits self.normcoef = mu # Note: eval_func will be discarded on arithmetic self._eval_func = eval_func def __call__(self, v): if self._eval_func and not isinstance(v, np.poly1d): return self._eval_func(v) else: return np.poly1d.__call__(self, v) def _scale(self, p): if p == 1.0: return self._coeffs *= p evf = self._eval_func if evf: self._eval_func = lambda x: evf(x) * p self.normcoef *= p def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu): """[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu) Returns the roots (x) of an nth order orthogonal polynomial, and weights (w) to use in appropriate Gaussian quadrature with that orthogonal polynomial. The polynomials have the recurrence relation P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x) an_func(n) should return A_n sqrt_bn_func(n) should return sqrt(B_n) mu ( = h_0 ) is the integral of the weight over the orthogonal interval """ k = np.arange(n, dtype='d') c = np.zeros((2, n)) c[0,1:] = bn_func(k[1:]) c[1,:] = an_func(k) x = linalg.eigvals_banded(c, overwrite_a_band=True) # improve roots by one application of Newton's method y = f(n, x) dy = df(n, x) x -= y/dy fm = f(n-1, x) fm /= np.abs(fm).max() dy /= np.abs(dy).max() w = 1.0 / (fm * dy) if symmetrize: w = (w + w[::-1]) / 2 x = (x - x[::-1]) / 2 w *= mu0 / w.sum() if mu: return x, w, mu0 else: return x, w # Jacobi Polynomials 1 P^(alpha,beta)_n(x) def roots_jacobi(n, alpha, beta, mu=False): r"""Gauss-Jacobi quadrature. Compute the sample points and weights for Gauss-Jacobi quadrature. The sample points are the roots of the nth degree Jacobi polynomial, :math:`P^{\alpha, \beta}_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`w(x) = (1 - x)^{\alpha} (1 + x)^{\beta}`. See 22.2.1 in [AS]_ for details. Parameters ---------- n : int quadrature order alpha : float alpha must be > -1 beta : float beta must be > -1 mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") if alpha <= -1 or beta <= -1: raise ValueError("alpha and beta must be greater than -1.") if alpha == 0.0 and beta == 0.0: return roots_legendre(m, mu) if alpha == beta: return roots_gegenbauer(m, alpha+0.5, mu) mu0 = 2.0**(alpha+beta+1)*cephes.beta(alpha+1, beta+1) a = alpha b = beta if a + b == 0.0: an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), 0.0) else: an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), (b*b - a*a) / ((2.0*k+a+b)*(2.0*k+a+b+2))) bn_func = lambda k: 2.0 / (2.0*k+a+b)*np.sqrt((k+a)*(k+b) / (2*k+a+b+1)) \ * np.where(k == 1, 1.0, np.sqrt(k*(k+a+b) / (2.0*k+a+b-1))) f = lambda n, x: cephes.eval_jacobi(n, a, b, x) df = lambda n, x: 0.5 * (n + a + b + 1) \ * cephes.eval_jacobi(n-1, a+1, b+1, x) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu) def jacobi(n, alpha, beta, monic=False): r"""Jacobi polynomial. Defined to be the solution of .. math:: (1 - x^2)\frac{d^2}{dx^2}P_n^{(\alpha, \beta)} + (\beta - \alpha - (\alpha + \beta + 2)x) \frac{d}{dx}P_n^{(\alpha, \beta)} + n(n + \alpha + \beta + 1)P_n^{(\alpha, \beta)} = 0 for :math:`\alpha, \beta > -1`; :math:`P_n^{(\alpha, \beta)}` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. alpha : float Parameter, must be greater than -1. beta : float Parameter, must be greater than -1. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- P : orthopoly1d Jacobi polynomial. Notes ----- For fixed :math:`\alpha, \beta`, the polynomials :math:`P_n^{(\alpha, \beta)}` are orthogonal over :math:`[-1, 1]` with weight function :math:`(1 - x)^\alpha(1 + x)^\beta`. """ if n < 0: raise ValueError("n must be nonnegative.") wfunc = lambda x: (1 - x)**alpha * (1 + x)**beta if n == 0: return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic, eval_func=np.ones_like) x, w, mu = roots_jacobi(n, alpha, beta, mu=True) ab1 = alpha + beta + 1.0 hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1) hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1) kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1) # here kn = coefficient on x^n term p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic, lambda x: eval_jacobi(n, alpha, beta, x)) return p # Jacobi Polynomials shifted G_n(p,q,x) def roots_sh_jacobi(n, p1, q1, mu=False): """Gauss-Jacobi (shifted) quadrature. Compute the sample points and weights for Gauss-Jacobi (shifted) quadrature. The sample points are the roots of the nth degree shifted Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, 1]` with weight function :math:`w(x) = (1 - x)^{p-q} x^{q-1}`. See 22.2.2 in [AS]_ for details. Parameters ---------- n : int quadrature order p1 : float (p1 - q1) must be > -1 q1 : float q1 must be > 0 mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ if (p1-q1) <= -1 or q1 <= 0: raise ValueError("(p - q) must be greater than -1, and q must be greater than 0.") x, w, m = roots_jacobi(n, p1-q1, q1-1, True) x = (x + 1) / 2 scale = 2.0**p1 w /= scale m /= scale if mu: return x, w, m else: return x, w def sh_jacobi(n, p, q, monic=False): r"""Shifted Jacobi polynomial. Defined by .. math:: G_n^{(p, q)}(x) = \binom{2n + p - 1}{n}^{-1}P_n^{(p - q, q - 1)}(2x - 1), where :math:`P_n^{(\cdot, \cdot)}` is the nth Jacobi polynomial. Parameters ---------- n : int Degree of the polynomial. p : float Parameter, must have :math:`p > q - 1`. q : float Parameter, must be greater than 0. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- G : orthopoly1d Shifted Jacobi polynomial. Notes ----- For fixed :math:`p, q`, the polynomials :math:`G_n^{(p, q)}` are orthogonal over :math:`[0, 1]` with weight function :math:`(1 - x)^{p - q}x^{q - 1}`. """ if n < 0: raise ValueError("n must be nonnegative.") wfunc = lambda x: (1.0 - x)**(p - q) * (x)**(q - 1.) if n == 0: return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic, eval_func=np.ones_like) n1 = n x, w, mu0 = roots_sh_jacobi(n1, p, q, mu=True) hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1) hn /= (2 * n + p) * (_gam(2 * n + p)**2) # kn = 1.0 in standard form so monic is redundant. Kept for compatibility. kn = 1.0 pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic, eval_func=lambda x: eval_sh_jacobi(n, p, q, x)) return pp # Generalized Laguerre L^(alpha)_n(x) def roots_genlaguerre(n, alpha, mu=False): r"""Gauss-generalized Laguerre quadrature. Compute the sample points and weights for Gauss-generalized Laguerre quadrature. The sample points are the roots of the nth degree generalized Laguerre polynomial, :math:`L^{\alpha}_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, \infty]` with weight function :math:`w(x) = x^{\alpha} e^{-x}`. See 22.3.9 in [AS]_ for details. Parameters ---------- n : int quadrature order alpha : float alpha must be > -1 mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") if alpha < -1: raise ValueError("alpha must be greater than -1.") mu0 = cephes.gamma(alpha + 1) if m == 1: x = np.array([alpha+1.0], 'd') w = np.array([mu0], 'd') if mu: return x, w, mu0 else: return x, w an_func = lambda k: 2 * k + alpha + 1 bn_func = lambda k: -np.sqrt(k * (k + alpha)) f = lambda n, x: cephes.eval_genlaguerre(n, alpha, x) df = lambda n, x: (n*cephes.eval_genlaguerre(n, alpha, x) - (n + alpha)*cephes.eval_genlaguerre(n-1, alpha, x))/x return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu) def genlaguerre(n, alpha, monic=False): r"""Generalized (associated) Laguerre polynomial. Defined to be the solution of .. math:: x\frac{d^2}{dx^2}L_n^{(\alpha)} + (\alpha + 1 - x)\frac{d}{dx}L_n^{(\alpha)} + nL_n^{(\alpha)} = 0, where :math:`\alpha > -1`; :math:`L_n^{(\alpha)}` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. alpha : float Parameter, must be greater than -1. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- L : orthopoly1d Generalized Laguerre polynomial. Notes ----- For fixed :math:`\alpha`, the polynomials :math:`L_n^{(\alpha)}` are orthogonal over :math:`[0, \infty)` with weight function :math:`e^{-x}x^\alpha`. The Laguerre polynomials are the special case where :math:`\alpha = 0`. See Also -------- laguerre : Laguerre polynomial. """ if alpha <= -1: raise ValueError("alpha must be > -1") if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_genlaguerre(n1, alpha, mu=True) wfunc = lambda x: exp(-x) * x**alpha if n == 0: x, w = [], [] hn = _gam(n + alpha + 1) / _gam(n + 1) kn = (-1)**n / _gam(n + 1) p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic, lambda x: eval_genlaguerre(n, alpha, x)) return p # Laguerre L_n(x) def roots_laguerre(n, mu=False): r"""Gauss-Laguerre quadrature. Compute the sample points and weights for Gauss-Laguerre quadrature. The sample points are the roots of the nth degree Laguerre polynomial, :math:`L_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, \infty]` with weight function :math:`w(x) = e^{-x}`. See 22.2.13 in [AS]_ for details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.laguerre.laggauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ return roots_genlaguerre(n, 0.0, mu=mu) def laguerre(n, monic=False): r"""Laguerre polynomial. Defined to be the solution of .. math:: x\frac{d^2}{dx^2}L_n + (1 - x)\frac{d}{dx}L_n + nL_n = 0; :math:`L_n` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- L : orthopoly1d Laguerre Polynomial. Notes ----- The polynomials :math:`L_n` are orthogonal over :math:`[0, \infty)` with weight function :math:`e^{-x}`. """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_laguerre(n1, mu=True) if n == 0: x, w = [], [] hn = 1.0 kn = (-1)**n / _gam(n + 1) p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic, lambda x: eval_laguerre(n, x)) return p # Hermite 1 H_n(x) def roots_hermite(n, mu=False): r"""Gauss-Hermite (physicist's) quadrature. Compute the sample points and weights for Gauss-Hermite quadrature. The sample points are the roots of the nth degree Hermite polynomial, :math:`H_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-\infty, \infty]` with weight function :math:`w(x) = e^{-x^2}`. See 22.2.14 in [AS]_ for details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights Notes ----- For small n up to 150 a modified version of the Golub-Welsch algorithm is used. Nodes are computed from the eigenvalue problem and improved by one step of a Newton iteration. The weights are computed from the well-known analytical formula. For n larger than 150 an optimal asymptotic algorithm is applied which computes nodes and weights in a numerically stable manner. The algorithm has linear runtime making computation for very large n (several thousand or more) feasible. See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.hermite.hermgauss roots_hermitenorm References ---------- .. [townsend.trogdon.olver-2014] Townsend, A. and Trogdon, T. and Olver, S. (2014) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. :arXiv:`1410.5286`. .. [townsend.trogdon.olver-2015] Townsend, A. and Trogdon, T. and Olver, S. (2015) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. IMA Journal of Numerical Analysis :doi:`10.1093/imanum/drv002`. .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") mu0 = np.sqrt(np.pi) if n <= 150: an_func = lambda k: 0.0*k bn_func = lambda k: np.sqrt(k/2.0) f = cephes.eval_hermite df = lambda n, x: 2.0 * n * cephes.eval_hermite(n-1, x) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) else: nodes, weights = _roots_hermite_asy(m) if mu: return nodes, weights, mu0 else: return nodes, weights def _compute_tauk(n, k, maxit=5): """Helper function for Tricomi initial guesses For details, see formula 3.1 in lemma 3.1 in the original paper. Parameters ---------- n : int Quadrature order k : ndarray of type int Index of roots :math:`\tau_k` to compute maxit : int Number of Newton maxit performed, the default value of 5 is sufficient. Returns ------- tauk : ndarray Roots of equation 3.1 See Also -------- initial_nodes_a roots_hermite_asy """ a = n % 2 - 0.5 c = (4.0*floor(n/2.0) - 4.0*k + 3.0)*pi / (4.0*floor(n/2.0) + 2.0*a + 2.0) f = lambda x: x - sin(x) - c df = lambda x: 1.0 - cos(x) xi = 0.5*pi for i in range(maxit): xi = xi - f(xi)/df(xi) return xi def _initial_nodes_a(n, k): r"""Tricomi initial guesses Computes an initial approximation to the square of the `k`-th (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n` of order :math:`n`. The formula is the one from lemma 3.1 in the original paper. The guesses are accurate except in the region near :math:`\sqrt{2n + 1}`. Parameters ---------- n : int Quadrature order k : ndarray of type int Index of roots to compute Returns ------- xksq : ndarray Square of the approximate roots See Also -------- initial_nodes roots_hermite_asy """ tauk = _compute_tauk(n, k) sigk = cos(0.5*tauk)**2 a = n % 2 - 0.5 nu = 4.0*floor(n/2.0) + 2.0*a + 2.0 # Initial approximation of Hermite roots (square) xksq = nu*sigk - 1.0/(3.0*nu) * (5.0/(4.0*(1.0-sigk)**2) - 1.0/(1.0-sigk) - 0.25) return xksq def _initial_nodes_b(n, k): r"""Gatteschi initial guesses Computes an initial approximation to the square of the kth (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n` of order :math:`n`. The formula is the one from lemma 3.2 in the original paper. The guesses are accurate in the region just below :math:`\sqrt{2n + 1}`. Parameters ---------- n : int Quadrature order k : ndarray of type int Index of roots to compute Returns ------- xksq : ndarray Square of the approximate root See Also -------- initial_nodes roots_hermite_asy """ a = n % 2 - 0.5 nu = 4.0*floor(n/2.0) + 2.0*a + 2.0 # Airy roots by approximation ak = specfun.airyzo(k.max(), 1)[0][::-1] # Initial approximation of Hermite roots (square) xksq = (nu + 2.0**(2.0/3.0) * ak * nu**(1.0/3.0) + 1.0/5.0 * 2.0**(4.0/3.0) * ak**2 * nu**(-1.0/3.0) + (9.0/140.0 - 12.0/175.0 * ak**3) * nu**(-1.0) + (16.0/1575.0 * ak + 92.0/7875.0 * ak**4) * 2.0**(2.0/3.0) * nu**(-5.0/3.0) - (15152.0/3031875.0 * ak**5 + 1088.0/121275.0 * ak**2) * 2.0**(1.0/3.0) * nu**(-7.0/3.0)) return xksq def _initial_nodes(n): """Initial guesses for the Hermite roots Computes an initial approximation to the non-negative roots :math:`x_k` of the Hermite polynomial :math:`H_n` of order :math:`n`. The Tricomi and Gatteschi initial guesses are used in the region where they are accurate. Parameters ---------- n : int Quadrature order Returns ------- xk : ndarray Approximate roots See Also -------- roots_hermite_asy """ # Turnover point # linear polynomial fit to error of 10, 25, 40, ..., 1000 point rules fit = 0.49082003*n - 4.37859653 turnover = around(fit).astype(int) # Compute all approximations ia = arange(1, int(floor(n*0.5)+1)) ib = ia[::-1] xasq = _initial_nodes_a(n, ia[:turnover+1]) xbsq = _initial_nodes_b(n, ib[turnover+1:]) # Combine iv = sqrt(hstack([xasq, xbsq])) # Central node is always zero if n % 2 == 1: iv = hstack([0.0, iv]) return iv def _pbcf(n, theta): r"""Asymptotic series expansion of parabolic cylinder function The implementation is based on sections 3.2 and 3.3 from the original paper. Compared to the published version this code adds one more term to the asymptotic series. The detailed formulas can be found at [parabolic-asymptotics]_. The evaluation is done in a transformed variable :math:`\theta := \arccos(t)` where :math:`t := x / \mu` and :math:`\mu := \sqrt{2n + 1}`. Parameters ---------- n : int Quadrature order theta : ndarray Transformed position variable Returns ------- U : ndarray Value of the parabolic cylinder function :math:`U(a, \theta)`. Ud : ndarray Value of the derivative :math:`U^{\prime}(a, \theta)` of the parabolic cylinder function. See Also -------- roots_hermite_asy References ---------- .. [parabolic-asymptotics] https://dlmf.nist.gov/12.10#vii """ st = sin(theta) ct = cos(theta) # https://dlmf.nist.gov/12.10#vii mu = 2.0*n + 1.0 # https://dlmf.nist.gov/12.10#E23 eta = 0.5*theta - 0.5*st*ct # https://dlmf.nist.gov/12.10#E39 zeta = -(3.0*eta/2.0) ** (2.0/3.0) # https://dlmf.nist.gov/12.10#E40 phi = (-zeta / st**2) ** (0.25) # Coefficients # https://dlmf.nist.gov/12.10#E43 a0 = 1.0 a1 = 0.10416666666666666667 a2 = 0.08355034722222222222 a3 = 0.12822657455632716049 a4 = 0.29184902646414046425 a5 = 0.88162726744375765242 b0 = 1.0 b1 = -0.14583333333333333333 b2 = -0.09874131944444444444 b3 = -0.14331205391589506173 b4 = -0.31722720267841354810 b5 = -0.94242914795712024914 # Polynomials # https://dlmf.nist.gov/12.10#E9 # https://dlmf.nist.gov/12.10#E10 ctp = ct ** arange(16).reshape((-1,1)) u0 = 1.0 u1 = (1.0*ctp[3,:] - 6.0*ct) / 24.0 u2 = (-9.0*ctp[4,:] + 249.0*ctp[2,:] + 145.0) / 1152.0 u3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 28287.0*ctp[5,:] - 151995.0*ctp[3,:] - 259290.0*ct) / 414720.0 u4 = (72756.0*ctp[10,:] - 321339.0*ctp[8,:] - 154982.0*ctp[6,:] + 50938215.0*ctp[4,:] + 122602962.0*ctp[2,:] + 12773113.0) / 39813120.0 u5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 1994971575.0*ctp[11,:] - 3630137104.0*ctp[9,:] + 4433574213.0*ctp[7,:] - 37370295816.0*ctp[5,:] - 119582875013.0*ctp[3,:] - 34009066266.0*ct) / 6688604160.0 v0 = 1.0 v1 = (1.0*ctp[3,:] + 6.0*ct) / 24.0 v2 = (15.0*ctp[4,:] - 327.0*ctp[2,:] - 143.0) / 1152.0 v3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 36387.0*ctp[5,:] + 238425.0*ctp[3,:] + 259290.0*ct) / 414720.0 v4 = (-121260.0*ctp[10,:] + 551733.0*ctp[8,:] - 151958.0*ctp[6,:] - 57484425.0*ctp[4,:] - 132752238.0*ctp[2,:] - 12118727) / 39813120.0 v5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 2025529095.0*ctp[11,:] - 3750839308.0*ctp[9,:] + 3832454253.0*ctp[7,:] + 35213253348.0*ctp[5,:] + 130919230435.0*ctp[3,:] + 34009066266*ct) / 6688604160.0 # Airy Evaluation (Bi and Bip unused) Ai, Aip, Bi, Bip = airy(mu**(4.0/6.0) * zeta) # Prefactor for U P = 2.0*sqrt(pi) * mu**(1.0/6.0) * phi # Terms for U # https://dlmf.nist.gov/12.10#E42 phip = phi ** arange(6, 31, 6).reshape((-1,1)) A0 = b0*u0 A1 = (b2*u0 + phip[0,:]*b1*u1 + phip[1,:]*b0*u2) / zeta**3 A2 = (b4*u0 + phip[0,:]*b3*u1 + phip[1,:]*b2*u2 + phip[2,:]*b1*u3 + phip[3,:]*b0*u4) / zeta**6 B0 = -(a1*u0 + phip[0,:]*a0*u1) / zeta**2 B1 = -(a3*u0 + phip[0,:]*a2*u1 + phip[1,:]*a1*u2 + phip[2,:]*a0*u3) / zeta**5 B2 = -(a5*u0 + phip[0,:]*a4*u1 + phip[1,:]*a3*u2 + phip[2,:]*a2*u3 + phip[3,:]*a1*u4 + phip[4,:]*a0*u5) / zeta**8 # U # https://dlmf.nist.gov/12.10#E35 U = P * (Ai * (A0 + A1/mu**2.0 + A2/mu**4.0) + Aip * (B0 + B1/mu**2.0 + B2/mu**4.0) / mu**(8.0/6.0)) # Prefactor for derivative of U Pd = sqrt(2.0*pi) * mu**(2.0/6.0) / phi # Terms for derivative of U # https://dlmf.nist.gov/12.10#E46 C0 = -(b1*v0 + phip[0,:]*b0*v1) / zeta C1 = -(b3*v0 + phip[0,:]*b2*v1 + phip[1,:]*b1*v2 + phip[2,:]*b0*v3) / zeta**4 C2 = -(b5*v0 + phip[0,:]*b4*v1 + phip[1,:]*b3*v2 + phip[2,:]*b2*v3 + phip[3,:]*b1*v4 + phip[4,:]*b0*v5) / zeta**7 D0 = a0*v0 D1 = (a2*v0 + phip[0,:]*a1*v1 + phip[1,:]*a0*v2) / zeta**3 D2 = (a4*v0 + phip[0,:]*a3*v1 + phip[1,:]*a2*v2 + phip[2,:]*a1*v3 + phip[3,:]*a0*v4) / zeta**6 # Derivative of U # https://dlmf.nist.gov/12.10#E36 Ud = Pd * (Ai * (C0 + C1/mu**2.0 + C2/mu**4.0) / mu**(4.0/6.0) + Aip * (D0 + D1/mu**2.0 + D2/mu**4.0)) return U, Ud def _newton(n, x_initial, maxit=5): """Newton iteration for polishing the asymptotic approximation to the zeros of the Hermite polynomials. Parameters ---------- n : int Quadrature order x_initial : ndarray Initial guesses for the roots maxit : int Maximal number of Newton iterations. The default 5 is sufficient, usually only one or two steps are needed. Returns ------- nodes : ndarray Quadrature nodes weights : ndarray Quadrature weights See Also -------- roots_hermite_asy """ # Variable transformation mu = sqrt(2.0*n + 1.0) t = x_initial / mu theta = arccos(t) # Newton iteration for i in range(maxit): u, ud = _pbcf(n, theta) dtheta = u / (sqrt(2.0) * mu * sin(theta) * ud) theta = theta + dtheta if max(abs(dtheta)) < 1e-14: break # Undo variable transformation x = mu * cos(theta) # Central node is always zero if n % 2 == 1: x[0] = 0.0 # Compute weights w = exp(-x**2) / (2.0*ud**2) return x, w def _roots_hermite_asy(n): r"""Gauss-Hermite (physicist's) quadrature for large n. Computes the sample points and weights for Gauss-Hermite quadrature. The sample points are the roots of the nth degree Hermite polynomial, :math:`H_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2}`. This method relies on asymptotic expansions which work best for n > 150. The algorithm has linear runtime making computation for very large n feasible. Parameters ---------- n : int quadrature order Returns ------- nodes : ndarray Quadrature nodes weights : ndarray Quadrature weights See Also -------- roots_hermite References ---------- .. [townsend.trogdon.olver-2014] Townsend, A. and Trogdon, T. and Olver, S. (2014) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. :arXiv:`1410.5286`. .. [townsend.trogdon.olver-2015] Townsend, A. and Trogdon, T. and Olver, S. (2015) *Fast computation of Gauss quadrature nodes and weights on the whole real line*. IMA Journal of Numerical Analysis :doi:`10.1093/imanum/drv002`. """ iv = _initial_nodes(n) nodes, weights = _newton(n, iv) # Combine with negative parts if n % 2 == 0: nodes = hstack([-nodes[::-1], nodes]) weights = hstack([weights[::-1], weights]) else: nodes = hstack([-nodes[-1:0:-1], nodes]) weights = hstack([weights[-1:0:-1], weights]) # Scale weights weights *= sqrt(pi) / sum(weights) return nodes, weights def hermite(n, monic=False): r"""Physicist's Hermite polynomial. Defined by .. math:: H_n(x) = (-1)^ne^{x^2}\frac{d^n}{dx^n}e^{-x^2}; :math:`H_n` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- H : orthopoly1d Hermite polynomial. Notes ----- The polynomials :math:`H_n` are orthogonal over :math:`(-\infty, \infty)` with weight function :math:`e^{-x^2}`. """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_hermite(n1, mu=True) wfunc = lambda x: exp(-x * x) if n == 0: x, w = [], [] hn = 2**n * _gam(n + 1) * sqrt(pi) kn = 2**n p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic, lambda x: eval_hermite(n, x)) return p # Hermite 2 He_n(x) def roots_hermitenorm(n, mu=False): r"""Gauss-Hermite (statistician's) quadrature. Compute the sample points and weights for Gauss-Hermite quadrature. The sample points are the roots of the nth degree Hermite polynomial, :math:`He_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-\infty, \infty]` with weight function :math:`w(x) = e^{-x^2/2}`. See 22.2.15 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights Notes ----- For small n up to 150 a modified version of the Golub-Welsch algorithm is used. Nodes are computed from the eigenvalue problem and improved by one step of a Newton iteration. The weights are computed from the well-known analytical formula. For n larger than 150 an optimal asymptotic algorithm is used which computes nodes and weights in a numerical stable manner. The algorithm has linear runtime making computation for very large n (several thousand or more) feasible. See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.hermite_e.hermegauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") mu0 = np.sqrt(2.0*np.pi) if n <= 150: an_func = lambda k: 0.0*k bn_func = lambda k: np.sqrt(k) f = cephes.eval_hermitenorm df = lambda n, x: n * cephes.eval_hermitenorm(n-1, x) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) else: nodes, weights = _roots_hermite_asy(m) # Transform nodes *= sqrt(2) weights *= sqrt(2) if mu: return nodes, weights, mu0 else: return nodes, weights def hermitenorm(n, monic=False): r"""Normalized (probabilist's) Hermite polynomial. Defined by .. math:: He_n(x) = (-1)^ne^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2}; :math:`He_n` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- He : orthopoly1d Hermite polynomial. Notes ----- The polynomials :math:`He_n` are orthogonal over :math:`(-\infty, \infty)` with weight function :math:`e^{-x^2/2}`. """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_hermitenorm(n1, mu=True) wfunc = lambda x: exp(-x * x / 2.0) if n == 0: x, w = [], [] hn = sqrt(2 * pi) * _gam(n + 1) kn = 1.0 p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic, eval_func=lambda x: eval_hermitenorm(n, x)) return p # The remainder of the polynomials can be derived from the ones above. # Ultraspherical (Gegenbauer) C^(alpha)_n(x) def roots_gegenbauer(n, alpha, mu=False): r"""Gauss-Gegenbauer quadrature. Compute the sample points and weights for Gauss-Gegenbauer quadrature. The sample points are the roots of the nth degree Gegenbauer polynomial, :math:`C^{\alpha}_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`w(x) = (1 - x^2)^{\alpha - 1/2}`. See 22.2.3 in [AS]_ for more details. Parameters ---------- n : int quadrature order alpha : float alpha must be > -0.5 mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") if alpha < -0.5: raise ValueError("alpha must be greater than -0.5.") elif alpha == 0.0: # C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x) # strictly, we should just error out here, since the roots are not # really defined, but we used to return something useful, so let's # keep doing so. return roots_chebyt(n, mu) mu0 = np.sqrt(np.pi) * cephes.gamma(alpha + 0.5) / cephes.gamma(alpha + 1) an_func = lambda k: 0.0 * k bn_func = lambda k: np.sqrt(k * (k + 2 * alpha - 1) / (4 * (k + alpha) * (k + alpha - 1))) f = lambda n, x: cephes.eval_gegenbauer(n, alpha, x) df = lambda n, x: (-n*x*cephes.eval_gegenbauer(n, alpha, x) + (n + 2*alpha - 1)*cephes.eval_gegenbauer(n-1, alpha, x))/(1-x**2) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) def gegenbauer(n, alpha, monic=False): r"""Gegenbauer (ultraspherical) polynomial. Defined to be the solution of .. math:: (1 - x^2)\frac{d^2}{dx^2}C_n^{(\alpha)} - (2\alpha + 1)x\frac{d}{dx}C_n^{(\alpha)} + n(n + 2\alpha)C_n^{(\alpha)} = 0 for :math:`\alpha > -1/2`; :math:`C_n^{(\alpha)}` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- C : orthopoly1d Gegenbauer polynomial. Notes ----- The polynomials :math:`C_n^{(\alpha)}` are orthogonal over :math:`[-1,1]` with weight function :math:`(1 - x^2)^{(\alpha - 1/2)}`. """ base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic) if monic: return base # Abrahmowitz and Stegan 22.5.20 factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) / _gam(2*alpha) / _gam(alpha + 0.5 + n)) base._scale(factor) base.__dict__['_eval_func'] = lambda x: eval_gegenbauer(float(n), alpha, x) return base # Chebyshev of the first kind: T_n(x) = # n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x) # Computed anew. def roots_chebyt(n, mu=False): r"""Gauss-Chebyshev (first kind) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree Chebyshev polynomial of the first kind, :math:`T_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`w(x) = 1/\sqrt{1 - x^2}`. See 22.2.4 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.chebyshev.chebgauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ m = int(n) if n < 1 or n != m: raise ValueError('n must be a positive integer.') x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2*m)) w = np.full_like(x, pi/m) if mu: return x, w, pi else: return x, w def chebyt(n, monic=False): r"""Chebyshev polynomial of the first kind. Defined to be the solution of .. math:: (1 - x^2)\frac{d^2}{dx^2}T_n - x\frac{d}{dx}T_n + n^2T_n = 0; :math:`T_n` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- T : orthopoly1d Chebyshev polynomial of the first kind. Notes ----- The polynomials :math:`T_n` are orthogonal over :math:`[-1, 1]` with weight function :math:`(1 - x^2)^{-1/2}`. See Also -------- chebyu : Chebyshev polynomial of the second kind. """ if n < 0: raise ValueError("n must be nonnegative.") wfunc = lambda x: 1.0 / sqrt(1 - x * x) if n == 0: return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic, lambda x: eval_chebyt(n, x)) n1 = n x, w, mu = roots_chebyt(n1, mu=True) hn = pi / 2 kn = 2**(n - 1) p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic, lambda x: eval_chebyt(n, x)) return p # Chebyshev of the second kind # U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x) def roots_chebyu(n, mu=False): r"""Gauss-Chebyshev (second kind) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree Chebyshev polynomial of the second kind, :math:`U_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`w(x) = \sqrt{1 - x^2}`. See 22.2.5 in [AS]_ for details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ m = int(n) if n < 1 or n != m: raise ValueError('n must be a positive integer.') t = np.arange(m, 0, -1) * pi / (m + 1) x = np.cos(t) w = pi * np.sin(t)**2 / (m + 1) if mu: return x, w, pi / 2 else: return x, w def chebyu(n, monic=False): r"""Chebyshev polynomial of the second kind. Defined to be the solution of .. math:: (1 - x^2)\frac{d^2}{dx^2}U_n - 3x\frac{d}{dx}U_n + n(n + 2)U_n = 0; :math:`U_n` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- U : orthopoly1d Chebyshev polynomial of the second kind. Notes ----- The polynomials :math:`U_n` are orthogonal over :math:`[-1, 1]` with weight function :math:`(1 - x^2)^{1/2}`. See Also -------- chebyt : Chebyshev polynomial of the first kind. """ base = jacobi(n, 0.5, 0.5, monic=monic) if monic: return base factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5) base._scale(factor) return base # Chebyshev of the first kind C_n(x) def roots_chebyc(n, mu=False): r"""Gauss-Chebyshev (first kind) quadrature. Compute the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree Chebyshev polynomial of the first kind, :math:`C_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-2, 2]` with weight function :math:`w(x) = 1 / \sqrt{1 - (x/2)^2}`. See 22.2.6 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ x, w, m = roots_chebyt(n, True) x *= 2 w *= 2 m *= 2 if mu: return x, w, m else: return x, w def chebyc(n, monic=False): r"""Chebyshev polynomial of the first kind on :math:`[-2, 2]`. Defined as :math:`C_n(x) = 2T_n(x/2)`, where :math:`T_n` is the nth Chebychev polynomial of the first kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- C : orthopoly1d Chebyshev polynomial of the first kind on :math:`[-2, 2]`. Notes ----- The polynomials :math:`C_n(x)` are orthogonal over :math:`[-2, 2]` with weight function :math:`1/\sqrt{1 - (x/2)^2}`. See Also -------- chebyt : Chebyshev polynomial of the first kind. References ---------- .. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions" Section 22. National Bureau of Standards, 1972. """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_chebyc(n1, mu=True) if n == 0: x, w = [], [] hn = 4 * pi * ((n == 0) + 1) kn = 1.0 p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0), limits=(-2, 2), monic=monic) if not monic: p._scale(2.0 / p(2)) p.__dict__['_eval_func'] = lambda x: eval_chebyc(n, x) return p # Chebyshev of the second kind S_n(x) def roots_chebys(n, mu=False): r"""Gauss-Chebyshev (second kind) quadrature. Compute the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree Chebyshev polynomial of the second kind, :math:`S_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-2, 2]` with weight function :math:`w(x) = \sqrt{1 - (x/2)^2}`. See 22.2.7 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ x, w, m = roots_chebyu(n, True) x *= 2 w *= 2 m *= 2 if mu: return x, w, m else: return x, w def chebys(n, monic=False): r"""Chebyshev polynomial of the second kind on :math:`[-2, 2]`. Defined as :math:`S_n(x) = U_n(x/2)` where :math:`U_n` is the nth Chebychev polynomial of the second kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- S : orthopoly1d Chebyshev polynomial of the second kind on :math:`[-2, 2]`. Notes ----- The polynomials :math:`S_n(x)` are orthogonal over :math:`[-2, 2]` with weight function :math:`\sqrt{1 - (x/2)}^2`. See Also -------- chebyu : Chebyshev polynomial of the second kind References ---------- .. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions" Section 22. National Bureau of Standards, 1972. """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_chebys(n1, mu=True) if n == 0: x, w = [], [] hn = pi kn = 1.0 p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: sqrt(1 - x * x / 4.0), limits=(-2, 2), monic=monic) if not monic: factor = (n + 1.0) / p(2) p._scale(factor) p.__dict__['_eval_func'] = lambda x: eval_chebys(n, x) return p # Shifted Chebyshev of the first kind T^*_n(x) def roots_sh_chebyt(n, mu=False): r"""Gauss-Chebyshev (first kind, shifted) quadrature. Compute the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree shifted Chebyshev polynomial of the first kind, :math:`T_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, 1]` with weight function :math:`w(x) = 1/\sqrt{x - x^2}`. See 22.2.8 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ xw = roots_chebyt(n, mu) return ((xw[0] + 1) / 2,) + xw[1:] def sh_chebyt(n, monic=False): r"""Shifted Chebyshev polynomial of the first kind. Defined as :math:`T^*_n(x) = T_n(2x - 1)` for :math:`T_n` the nth Chebyshev polynomial of the first kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- T : orthopoly1d Shifted Chebyshev polynomial of the first kind. Notes ----- The polynomials :math:`T^*_n` are orthogonal over :math:`[0, 1]` with weight function :math:`(x - x^2)^{-1/2}`. """ base = sh_jacobi(n, 0.0, 0.5, monic=monic) if monic: return base if n > 0: factor = 4**n / 2.0 else: factor = 1.0 base._scale(factor) return base # Shifted Chebyshev of the second kind U^*_n(x) def roots_sh_chebyu(n, mu=False): r"""Gauss-Chebyshev (second kind, shifted) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree shifted Chebyshev polynomial of the second kind, :math:`U_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, 1]` with weight function :math:`w(x) = \sqrt{x - x^2}`. See 22.2.9 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ x, w, m = roots_chebyu(n, True) x = (x + 1) / 2 m_us = cephes.beta(1.5, 1.5) w *= m_us / m if mu: return x, w, m_us else: return x, w def sh_chebyu(n, monic=False): r"""Shifted Chebyshev polynomial of the second kind. Defined as :math:`U^*_n(x) = U_n(2x - 1)` for :math:`U_n` the nth Chebyshev polynomial of the second kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- U : orthopoly1d Shifted Chebyshev polynomial of the second kind. Notes ----- The polynomials :math:`U^*_n` are orthogonal over :math:`[0, 1]` with weight function :math:`(x - x^2)^{1/2}`. """ base = sh_jacobi(n, 2.0, 1.5, monic=monic) if monic: return base factor = 4**n base._scale(factor) return base # Legendre def roots_legendre(n, mu=False): r"""Gauss-Legendre quadrature. Compute the sample points and weights for Gauss-Legendre quadrature. The sample points are the roots of the nth degree Legendre polynomial :math:`P_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`w(x) = 1.0`. See 2.2.10 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.legendre.leggauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ m = int(n) if n < 1 or n != m: raise ValueError("n must be a positive integer.") mu0 = 2.0 an_func = lambda k: 0.0 * k bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1)) f = cephes.eval_legendre df = lambda n, x: (-n*x*cephes.eval_legendre(n, x) + n*cephes.eval_legendre(n-1, x))/(1-x**2) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) def legendre(n, monic=False): r"""Legendre polynomial. Defined to be the solution of .. math:: \frac{d}{dx}\left[(1 - x^2)\frac{d}{dx}P_n(x)\right] + n(n + 1)P_n(x) = 0; :math:`P_n(x)` is a polynomial of degree :math:`n`. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- P : orthopoly1d Legendre polynomial. Notes ----- The polynomials :math:`P_n` are orthogonal over :math:`[-1, 1]` with weight function 1. Examples -------- Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0): >>> from scipy.special import legendre >>> legendre(3) poly1d([ 2.5, 0. , -1.5, 0. ]) """ if n < 0: raise ValueError("n must be nonnegative.") if n == 0: n1 = n + 1 else: n1 = n x, w, mu0 = roots_legendre(n1, mu=True) if n == 0: x, w = [], [] hn = 2.0 / (2 * n + 1) kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1), monic=monic, eval_func=lambda x: eval_legendre(n, x)) return p # Shifted Legendre P^*_n(x) def roots_sh_legendre(n, mu=False): r"""Gauss-Legendre (shifted) quadrature. Compute the sample points and weights for Gauss-Legendre quadrature. The sample points are the roots of the nth degree shifted Legendre polynomial :math:`P^*_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[0, 1]` with weight function :math:`w(x) = 1.0`. See 2.2.11 in [AS]_ for details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ x, w = roots_legendre(n) x = (x + 1) / 2 w /= 2 if mu: return x, w, 1.0 else: return x, w def sh_legendre(n, monic=False): r"""Shifted Legendre polynomial. Defined as :math:`P^*_n(x) = P_n(2x - 1)` for :math:`P_n` the nth Legendre polynomial. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If `True`, scale the leading coefficient to be 1. Default is `False`. Returns ------- P : orthopoly1d Shifted Legendre polynomial. Notes ----- The polynomials :math:`P^*_n` are orthogonal over :math:`[0, 1]` with weight function 1. """ if n < 0: raise ValueError("n must be nonnegative.") wfunc = lambda x: 0.0 * x + 1.0 if n == 0: return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic, lambda x: eval_sh_legendre(n, x)) x, w, mu0 = roots_sh_legendre(n, mu=True) hn = 1.0 / (2 * n + 1.0) kn = _gam(2 * n + 1) / _gam(n + 1)**2 p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic, eval_func=lambda x: eval_sh_legendre(n, x)) return p # ----------------------------------------------------------------------------- # Code for backwards compatibility # ----------------------------------------------------------------------------- # Import functions in case someone is still calling the orthogonal # module directly. (They shouldn't be; it's not in the public API). poch = cephes.poch # eval_chebyu, eval_sh_chebyt and eval_sh_chebyu: These functions are not # used in orthogonal.py, they are not in _rootfuns_map, but their names # do appear in _evalfuns, so they must be kept. from ._ufuncs import (binom, eval_jacobi, eval_sh_jacobi, eval_gegenbauer, eval_chebyt, eval_chebyu, eval_chebys, eval_chebyc, eval_sh_chebyt, eval_sh_chebyu, eval_legendre, eval_sh_legendre, eval_genlaguerre, eval_laguerre, eval_hermite, eval_hermitenorm) # Make the old root function names an alias for the new ones _modattrs = globals() for newfun, oldfun in _rootfuns_map.items(): _modattrs[oldfun] = _modattrs[newfun] __all__.append(oldfun)
pizzathief/scipy
scipy/special/orthogonal.py
Python
bsd-3-clause
62,286
[ "Gaussian" ]
f45c119e59ab0afaed57c765417107bb97a693f77259c0090a4651a8e4fc593d
import numpy as np import _RandomNumberGenerator as RNG def add(projections, Gaussian=None, Poisson=None): if Poisson is not None: if not np.isscalar(Poisson): raise ValueError( "Poisson value should be an scalar, is " + str(type(Poisson)) + " instead." ) else: Poisson = np.ceil(np.log2(np.max(np.abs(projections)))) # nextpow2 if Gaussian is not None: if not isinstance(Gaussian, np.ndarray): raise ValueError( "Gaussian value should be an array, is " + str(type(Gaussian)) + " instead." ) if Gaussian.shape != (2,): raise ValueError("Gaussian shape should be 1x2, is " + str(Gaussian.shape) + "instead.") max_proj = np.max(projections) projections = Poisson * np.exp(-projections / max_proj) projections = RNG.add_noise(projections, Gaussian[0], Gaussian[1]) projections = -np.log(projections / Poisson) * max_proj projections = np.float32(projections) return projections
CERN/TIGRE
Python/tigre/utilities/CTnoise.py
Python
bsd-3-clause
1,041
[ "Gaussian" ]
beceb94e1643db13c7d1fcb96296eaf46d40ab489226c360d8312650730583fe
# -*- coding: utf-8 -*- # Copyright: (c) 2019, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json import os import pytest import tarfile import uuid from hashlib import sha256 from io import BytesIO from units.compat.mock import MagicMock from ansible import context from ansible.cli.galaxy import GalaxyCLI from ansible.errors import AnsibleError from ansible.galaxy import api, collection, token from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.utils import context_objects as co from ansible.utils.display import Display from ansible.utils.hashing import secure_hash_s @pytest.fixture(autouse='function') def reset_cli_args(): co.GlobalCLIArgs._Singleton__instance = None yield co.GlobalCLIArgs._Singleton__instance = None @pytest.fixture() def collection_input(tmp_path_factory): ''' Creates a collection skeleton directory for build tests ''' test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input')) namespace = 'ansible_namespace' collection = 'collection' skeleton = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton') galaxy_args = ['ansible-galaxy', 'collection', 'init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir, '--collection-skeleton', skeleton] GalaxyCLI(args=galaxy_args).run() collection_dir = os.path.join(test_dir, namespace, collection) output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Output')) return collection_dir, output_dir @pytest.fixture() def collection_artifact(monkeypatch, tmp_path_factory): ''' Creates a temp collection artifact and mocked open_url instance for publishing tests ''' mock_open = MagicMock() monkeypatch.setattr(collection, 'open_url', mock_open) mock_uuid = MagicMock() mock_uuid.return_value.hex = 'uuid' monkeypatch.setattr(uuid, 'uuid4', mock_uuid) tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections') input_file = to_text(tmp_path / 'collection.tar.gz') with tarfile.open(input_file, 'w:gz') as tfile: b_io = BytesIO(b"\x00\x01\x02\x03") tar_info = tarfile.TarInfo('test') tar_info.size = 4 tar_info.mode = 0o0644 tfile.addfile(tarinfo=tar_info, fileobj=b_io) return input_file, mock_open @pytest.fixture() def galaxy_yml(request, tmp_path_factory): b_test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')) b_galaxy_yml = os.path.join(b_test_dir, b'galaxy.yml') with open(b_galaxy_yml, 'wb') as galaxy_obj: galaxy_obj.write(to_bytes(request.param)) yield b_galaxy_yml @pytest.fixture() def tmp_tarfile(tmp_path_factory): ''' Creates a temporary tar file for _extract_tar_file tests ''' filename = u'ÅÑŚÌβŁÈ' temp_dir = to_bytes(tmp_path_factory.mktemp('test-%s Collections' % to_native(filename))) tar_file = os.path.join(temp_dir, to_bytes('%s.tar.gz' % filename)) data = os.urandom(8) with tarfile.open(tar_file, 'w:gz') as tfile: b_io = BytesIO(data) tar_info = tarfile.TarInfo(filename) tar_info.size = len(data) tar_info.mode = 0o0644 tfile.addfile(tarinfo=tar_info, fileobj=b_io) sha256_hash = sha256() sha256_hash.update(data) with tarfile.open(tar_file, 'r') as tfile: yield temp_dir, tfile, filename, sha256_hash.hexdigest() @pytest.fixture() def galaxy_server(): context.CLIARGS._store = {'ignore_certs': False} galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com', token=token.GalaxyToken(token='key')) return galaxy_api def test_build_collection_no_galaxy_yaml(): fake_path = u'/fake/ÅÑŚÌβŁÈ/path' expected = to_native("The collection galaxy.yml path '%s/galaxy.yml' does not exist." % fake_path) with pytest.raises(AnsibleError, match=expected): collection.build_collection(fake_path, 'output', False) def test_build_existing_output_file(collection_input): input_dir, output_dir = collection_input existing_output_dir = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz') os.makedirs(existing_output_dir) expected = "The output collection artifact '%s' already exists, but is a directory - aborting" \ % to_native(existing_output_dir) with pytest.raises(AnsibleError, match=expected): collection.build_collection(input_dir, output_dir, False) def test_build_existing_output_without_force(collection_input): input_dir, output_dir = collection_input existing_output = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz') with open(existing_output, 'w+') as out_file: out_file.write("random garbage") out_file.flush() expected = "The file '%s' already exists. You can use --force to re-create the collection artifact." \ % to_native(existing_output) with pytest.raises(AnsibleError, match=expected): collection.build_collection(input_dir, output_dir, False) def test_build_existing_output_with_force(collection_input): input_dir, output_dir = collection_input existing_output = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz') with open(existing_output, 'w+') as out_file: out_file.write("random garbage") out_file.flush() collection.build_collection(input_dir, output_dir, True) # Verify the file was replaced with an actual tar file assert tarfile.is_tarfile(existing_output) @pytest.mark.parametrize('galaxy_yml', [b'namespace: value: broken'], indirect=True) def test_invalid_yaml_galaxy_file(galaxy_yml): expected = to_native(b"Failed to parse the galaxy.yml at '%s' with the following error:" % galaxy_yml) with pytest.raises(AnsibleError, match=expected): collection._get_galaxy_yml(galaxy_yml) @pytest.mark.parametrize('galaxy_yml', [b'namespace: test_namespace'], indirect=True) def test_missing_required_galaxy_key(galaxy_yml): expected = "The collection galaxy.yml at '%s' is missing the following mandatory keys: authors, name, " \ "readme, version" % to_native(galaxy_yml) with pytest.raises(AnsibleError, match=expected): collection._get_galaxy_yml(galaxy_yml) @pytest.mark.parametrize('galaxy_yml', [b""" namespace: namespace name: collection authors: Jordan version: 0.1.0 readme: README.md invalid: value"""], indirect=True) def test_warning_extra_keys(galaxy_yml, monkeypatch): display_mock = MagicMock() monkeypatch.setattr(Display, 'warning', display_mock) collection._get_galaxy_yml(galaxy_yml) assert display_mock.call_count == 1 assert display_mock.call_args[0][0] == "Found unknown keys in collection galaxy.yml at '%s': invalid"\ % to_text(galaxy_yml) @pytest.mark.parametrize('galaxy_yml', [b""" namespace: namespace name: collection authors: Jordan version: 0.1.0 readme: README.md"""], indirect=True) def test_defaults_galaxy_yml(galaxy_yml): actual = collection._get_galaxy_yml(galaxy_yml) assert actual['namespace'] == 'namespace' assert actual['name'] == 'collection' assert actual['authors'] == ['Jordan'] assert actual['version'] == '0.1.0' assert actual['readme'] == 'README.md' assert actual['description'] is None assert actual['repository'] is None assert actual['documentation'] is None assert actual['homepage'] is None assert actual['issues'] is None assert actual['tags'] == [] assert actual['dependencies'] == {} assert actual['license_ids'] == [] @pytest.mark.parametrize('galaxy_yml', [(b""" namespace: namespace name: collection authors: Jordan version: 0.1.0 readme: README.md license: MIT"""), (b""" namespace: namespace name: collection authors: Jordan version: 0.1.0 readme: README.md license: - MIT""")], indirect=True) def test_galaxy_yml_list_value(galaxy_yml): actual = collection._get_galaxy_yml(galaxy_yml) assert actual['license_ids'] == ['MIT'] def test_build_ignore_files_and_folders(collection_input, monkeypatch): input_dir = collection_input[0] mock_display = MagicMock() monkeypatch.setattr(Display, 'vvv', mock_display) git_folder = os.path.join(input_dir, '.git') retry_file = os.path.join(input_dir, 'ansible.retry') os.makedirs(git_folder) with open(retry_file, 'w+') as ignore_file: ignore_file.write('random') ignore_file.flush() actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection') assert actual['format'] == 1 for manifest_entry in actual['files']: assert manifest_entry['name'] not in ['.git', 'ansible.retry', 'galaxy.yml'] expected_msgs = [ "Skipping '%s' for collection build" % to_text(retry_file), "Skipping '%s' for collection build" % to_text(git_folder), ] assert mock_display.call_count == 2 assert mock_display.mock_calls[0][1][0] in expected_msgs assert mock_display.mock_calls[1][1][0] in expected_msgs def test_build_ignore_older_release_in_root(collection_input, monkeypatch): input_dir = collection_input[0] mock_display = MagicMock() monkeypatch.setattr(Display, 'vvv', mock_display) # This is expected to be ignored because it is in the root collection dir. release_file = os.path.join(input_dir, 'namespace-collection-0.0.0.tar.gz') # This is not expected to be ignored because it is not in the root collection dir. fake_release_file = os.path.join(input_dir, 'plugins', 'namespace-collection-0.0.0.tar.gz') for filename in [release_file, fake_release_file]: with open(filename, 'w+') as file_obj: file_obj.write('random') file_obj.flush() actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection') assert actual['format'] == 1 plugin_release_found = False for manifest_entry in actual['files']: assert manifest_entry['name'] != 'namespace-collection-0.0.0.tar.gz' if manifest_entry['name'] == 'plugins/namespace-collection-0.0.0.tar.gz': plugin_release_found = True assert plugin_release_found assert mock_display.call_count == 1 assert mock_display.mock_calls[0][1][0] == "Skipping '%s' for collection build" % to_text(release_file) def test_build_ignore_symlink_target_outside_collection(collection_input, monkeypatch): input_dir, outside_dir = collection_input mock_display = MagicMock() monkeypatch.setattr(Display, 'warning', mock_display) link_path = os.path.join(input_dir, 'plugins', 'connection') os.symlink(outside_dir, link_path) actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection') for manifest_entry in actual['files']: assert manifest_entry['name'] != 'plugins/connection' assert mock_display.call_count == 1 assert mock_display.mock_calls[0][1][0] == "Skipping '%s' as it is a symbolic link to a directory outside " \ "the collection" % to_text(link_path) def test_build_copy_symlink_target_inside_collection(collection_input): input_dir = collection_input[0] os.makedirs(os.path.join(input_dir, 'playbooks', 'roles')) roles_link = os.path.join(input_dir, 'playbooks', 'roles', 'linked') roles_target = os.path.join(input_dir, 'roles', 'linked') roles_target_tasks = os.path.join(roles_target, 'tasks') os.makedirs(roles_target_tasks) with open(os.path.join(roles_target_tasks, 'main.yml'), 'w+') as tasks_main: tasks_main.write("---\n- hosts: localhost\n tasks:\n - ping:") tasks_main.flush() os.symlink(roles_target, roles_link) actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection') linked_entries = [e for e in actual['files'] if e['name'].startswith('playbooks/roles/linked')] assert len(linked_entries) == 3 assert linked_entries[0]['name'] == 'playbooks/roles/linked' assert linked_entries[0]['ftype'] == 'dir' assert linked_entries[1]['name'] == 'playbooks/roles/linked/tasks' assert linked_entries[1]['ftype'] == 'dir' assert linked_entries[2]['name'] == 'playbooks/roles/linked/tasks/main.yml' assert linked_entries[2]['ftype'] == 'file' assert linked_entries[2]['chksum_sha256'] == '9c97a1633c51796999284c62236b8d5462903664640079b80c37bf50080fcbc3' def test_build_with_symlink_inside_collection(collection_input): input_dir, output_dir = collection_input os.makedirs(os.path.join(input_dir, 'playbooks', 'roles')) roles_link = os.path.join(input_dir, 'playbooks', 'roles', 'linked') file_link = os.path.join(input_dir, 'docs', 'README.md') roles_target = os.path.join(input_dir, 'roles', 'linked') roles_target_tasks = os.path.join(roles_target, 'tasks') os.makedirs(roles_target_tasks) with open(os.path.join(roles_target_tasks, 'main.yml'), 'w+') as tasks_main: tasks_main.write("---\n- hosts: localhost\n tasks:\n - ping:") tasks_main.flush() os.symlink(roles_target, roles_link) os.symlink(os.path.join(input_dir, 'README.md'), file_link) collection.build_collection(input_dir, output_dir, False) output_artifact = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz') assert tarfile.is_tarfile(output_artifact) with tarfile.open(output_artifact, mode='r') as actual: members = actual.getmembers() linked_members = [m for m in members if m.path.startswith('playbooks/roles/linked/tasks')] assert len(linked_members) == 2 assert linked_members[0].name == 'playbooks/roles/linked/tasks' assert linked_members[0].isdir() assert linked_members[1].name == 'playbooks/roles/linked/tasks/main.yml' assert linked_members[1].isreg() linked_task = actual.extractfile(linked_members[1].name) actual_task = secure_hash_s(linked_task.read()) linked_task.close() assert actual_task == 'f4dcc52576b6c2cd8ac2832c52493881c4e54226' linked_file = [m for m in members if m.path == 'docs/README.md'] assert len(linked_file) == 1 assert linked_file[0].isreg() linked_file_obj = actual.extractfile(linked_file[0].name) actual_file = secure_hash_s(linked_file_obj.read()) linked_file_obj.close() assert actual_file == '63444bfc766154e1bc7557ef6280de20d03fcd81' def test_publish_no_wait(galaxy_server, collection_artifact, monkeypatch): mock_display = MagicMock() monkeypatch.setattr(Display, 'display', mock_display) artifact_path, mock_open = collection_artifact fake_import_uri = 'https://galaxy.server.com/api/v2/import/1234' mock_publish = MagicMock() mock_publish.return_value = fake_import_uri monkeypatch.setattr(galaxy_server, 'publish_collection', mock_publish) collection.publish_collection(artifact_path, galaxy_server, False, 0) assert mock_publish.call_count == 1 assert mock_publish.mock_calls[0][1][0] == artifact_path assert mock_display.call_count == 1 assert mock_display.mock_calls[0][1][0] == \ "Collection has been pushed to the Galaxy server %s %s, not waiting until import has completed due to " \ "--no-wait being set. Import task results can be found at %s" % (galaxy_server.name, galaxy_server.api_server, fake_import_uri) def test_publish_with_wait(galaxy_server, collection_artifact, monkeypatch): mock_display = MagicMock() monkeypatch.setattr(Display, 'display', mock_display) artifact_path, mock_open = collection_artifact fake_import_uri = 'https://galaxy.server.com/api/v2/import/1234' mock_publish = MagicMock() mock_publish.return_value = fake_import_uri monkeypatch.setattr(galaxy_server, 'publish_collection', mock_publish) mock_wait = MagicMock() monkeypatch.setattr(galaxy_server, 'wait_import_task', mock_wait) collection.publish_collection(artifact_path, galaxy_server, True, 0) assert mock_publish.call_count == 1 assert mock_publish.mock_calls[0][1][0] == artifact_path assert mock_wait.call_count == 1 assert mock_wait.mock_calls[0][1][0] == fake_import_uri assert mock_display.mock_calls[0][1][0] == "Collection has been published to the Galaxy server test_server %s" \ % galaxy_server.api_server def test_find_existing_collections(tmp_path_factory, monkeypatch): test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')) collection1 = os.path.join(test_dir, 'namespace1', 'collection1') collection2 = os.path.join(test_dir, 'namespace2', 'collection2') fake_collection1 = os.path.join(test_dir, 'namespace3', 'collection3') fake_collection2 = os.path.join(test_dir, 'namespace4') os.makedirs(collection1) os.makedirs(collection2) os.makedirs(os.path.split(fake_collection1)[0]) open(fake_collection1, 'wb+').close() open(fake_collection2, 'wb+').close() collection1_manifest = json.dumps({ 'collection_info': { 'namespace': 'namespace1', 'name': 'collection1', 'version': '1.2.3', 'authors': ['Jordan Borean'], 'readme': 'README.md', 'dependencies': {}, }, 'format': 1, }) with open(os.path.join(collection1, 'MANIFEST.json'), 'wb') as manifest_obj: manifest_obj.write(to_bytes(collection1_manifest)) mock_warning = MagicMock() monkeypatch.setattr(Display, 'warning', mock_warning) actual = collection._find_existing_collections(test_dir) assert len(actual) == 2 for actual_collection in actual: assert actual_collection.skip is True if str(actual_collection) == 'namespace1.collection1': assert actual_collection.namespace == 'namespace1' assert actual_collection.name == 'collection1' assert actual_collection.b_path == to_bytes(collection1) assert actual_collection.api is None assert actual_collection.versions == set(['1.2.3']) assert actual_collection.latest_version == '1.2.3' assert actual_collection.dependencies == {} else: assert actual_collection.namespace == 'namespace2' assert actual_collection.name == 'collection2' assert actual_collection.b_path == to_bytes(collection2) assert actual_collection.api is None assert actual_collection.versions == set(['*']) assert actual_collection.latest_version == '*' assert actual_collection.dependencies == {} assert mock_warning.call_count == 1 assert mock_warning.mock_calls[0][1][0] == "Collection at '%s' does not have a MANIFEST.json file, cannot " \ "detect version." % to_text(collection2) def test_download_file(tmp_path_factory, monkeypatch): temp_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')) data = b"\x00\x01\x02\x03" sha256_hash = sha256() sha256_hash.update(data) mock_open = MagicMock() mock_open.return_value = BytesIO(data) monkeypatch.setattr(collection, 'open_url', mock_open) expected = os.path.join(temp_dir, b'file') actual = collection._download_file('http://google.com/file', temp_dir, sha256_hash.hexdigest(), True) assert actual.startswith(expected) assert os.path.isfile(actual) with open(actual, 'rb') as file_obj: assert file_obj.read() == data assert mock_open.call_count == 1 assert mock_open.mock_calls[0][1][0] == 'http://google.com/file' def test_download_file_hash_mismatch(tmp_path_factory, monkeypatch): temp_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')) data = b"\x00\x01\x02\x03" mock_open = MagicMock() mock_open.return_value = BytesIO(data) monkeypatch.setattr(collection, 'open_url', mock_open) expected = "Mismatch artifact hash with downloaded file" with pytest.raises(AnsibleError, match=expected): collection._download_file('http://google.com/file', temp_dir, 'bad', True) def test_extract_tar_file_invalid_hash(tmp_tarfile): temp_dir, tfile, filename, dummy = tmp_tarfile expected = "Checksum mismatch for '%s' inside collection at '%s'" % (to_native(filename), to_native(tfile.name)) with pytest.raises(AnsibleError, match=expected): collection._extract_tar_file(tfile, filename, temp_dir, temp_dir, "fakehash") def test_extract_tar_file_missing_member(tmp_tarfile): temp_dir, tfile, dummy, dummy = tmp_tarfile expected = "Collection tar at '%s' does not contain the expected file 'missing'." % to_native(tfile.name) with pytest.raises(AnsibleError, match=expected): collection._extract_tar_file(tfile, 'missing', temp_dir, temp_dir) def test_extract_tar_file_missing_parent_dir(tmp_tarfile): temp_dir, tfile, filename, checksum = tmp_tarfile output_dir = os.path.join(temp_dir, b'output') output_file = os.path.join(output_dir, to_bytes(filename)) collection._extract_tar_file(tfile, filename, output_dir, temp_dir, checksum) os.path.isfile(output_file)
thaim/ansible
test/units/galaxy/test_collection.py
Python
mit
21,643
[ "Galaxy" ]
4bc15f17a234a6b40068cad70c30523c0aef81cffd126fffb2bbdf9f640eb5cd
# Copyright 2022 The T5 Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for t5.evaluation.metrics.""" from absl.testing import absltest import sklearn.metrics from t5.evaluation import metrics from t5.evaluation import test_utils class MetricsTest(test_utils.BaseMetricsTest): def test_same_bleu(self): ref = "this is a string" self.assertDictClose( metrics.bleu([ref, ref], [ref, ref]), {"bleu": 100}) def test_different_bleu(self): ref = "this is a string" self.assertDictClose( metrics.bleu([ref, ref], ["", ""]), {"bleu": 0}) def test_multiple_references_bleu(self): ref = "this is a string" self.assertDictClose( metrics.bleu([["", ref], [ref, ""]], [ref, ref]), {"bleu": 100}) def test_same_rouge(self): ref = "this is a string" self.assertDictClose( metrics.rouge([ref, ref], [ref, ref]), {"rouge1": 100, "rouge2": 100, "rougeLsum": 100}) def test_different_rouge(self): ref = "this is a string" self.assertDictClose( metrics.rouge([ref, ref], ["", ""]), {"rouge1": 0, "rouge2": 0, "rougeLsum": 0}) def test_same_squad(self): ref = "this is a string" self.assertDictClose( metrics.squad([["", ref], [ref, ref]], [ref, ref]), { "em": 100, "f1": 100, }) def test_different_squad(self): ref = "this is a string" self.assertDictClose( metrics.squad([[ref, ref], [ref, ref]], ["", ""]), { "em": 0, "f1": 0 }) def test_squad_big(self): self.assertDictClose( metrics.squad( [ ["big moose", "hippo"], ["correct1"], ["correct2.1", "correct2.2"], ["a", "b"], ], [ "‘a big Moose!‘", "wrong", "correct2.2", "c", ], ), {"em": 25., "f1": 35.}, places=2 ) def test_squad_small(self): self.assertDictClose( metrics.squad([["abc abd", "$$$$"]], ["abd"]), {"f1": 100 * 2.0 / 3.0, "em": 0.}, ) def test_span_squad(self): ref = "a string" ans_span = "start:2 end:3" context = "this is a string! it has the answer." self.assertDictClose( metrics.span_squad( [{"answers": ["", ref], "context": context}, {"answers": [ref, ref], "context": context}], [ans_span, ans_span]), {"em": 100, "f1": 100}) def test_trivia_qa(self): self.assertDictClose( metrics.trivia_qa( [ ["big moose", "hippo"], ["correct1"], ["correct2.1", "correct2.2"], ["a", "b"], ], [ "‘a big Moose!‘", "wrong", "correct2.2", "c", ], ), {"em": 50., "f1": 50.}, ) def test_span_squad_one_word(self): ref = "answer" ans_span = "start:1 end:1" context = "the answer" self.assertDictClose( metrics.span_squad([{ "answers": [ref], "context": context }], [ans_span]), {"em": 100, "f1": 100}) def test_span_squad_non_numbers(self): ref = "answer" ans_span = "start:test end:why" context = "the answer" self.assertDictClose( metrics.span_squad([{ "answers": [ref], "context": context }], [ans_span]), {"em": 0, "f1": 0}) def test_sequence_accuracy(self): s1 = "this is a string." s2 = "this is a completely different string." self.assertDictEqual( metrics.sequence_accuracy([s1, s2], [s1, s1]), {"sequence_accuracy": 50}) def test_multiclass_f1(self): self.assertDictClose( metrics.mean_multiclass_f1(num_classes=3)([0, 1, 1, 2], [0, 0, 2, 2]), {"mean_3class_f1": 44.44444444444444}) def test_all_match(self): self.assertDictEqual( metrics.all_match([0, 1], [0, 1]), {"exact_match": 100.0}) self.assertDictEqual( metrics.all_match([0, 1], [0, 2]), {"exact_match": 0.0}) def test_pearson_corrcoef(self): self.assertDictClose( metrics.pearson_corrcoef([0, 2], [0, 1]), {"pearson_corrcoef": 100.0}) def test_spearman_corrcoef(self): self.assertDictClose( metrics.spearman_corrcoef([0, 2, 1], [0, 1, 2]), {"spearman_corrcoef": 50.}) def test_f1_score_with_invalid(self): self.assertDictClose( metrics.f1_score_with_invalid([0, 1, 1, 0], [0, 1, 2, 2]), {"f1": 50.}) def test_accuracy(self): self.assertDictClose( metrics.accuracy([0, 0, 2, 1], [0, 1, 2, 1]), {"accuracy": 75.}) def test_mean_group_metric(self): metric_fn = metrics.mean_group_metric(metrics.accuracy) self.assertDictClose( metric_fn( [{"group": "a", "value": 0}, {"group": "a", "value": 1}, {"group": "b", "value": 0}], [{"value": 0}, {"value": 0}, {"value": 1}]), {"accuracy": 25.}) def test_mean_group_metric_with_subgroups(self): metric_fn = metrics.mean_group_metric( metrics.accuracy, return_subgroup_scores=True) self.assertDictClose( metric_fn( [{"group": "a", "value": 0}, {"group": "a", "value": 1}, {"group": "b", "value": 0}], [{"value": 0}, {"value": 0}, {"value": 1}]), {"accuracy": 25.0, "a-accuracy": 50.0, "b-accuracy": 0.0}) def test_multirc_f1_over_all_answers(self): metric_fn = metrics.multirc_f1_over_all_answers self.assertDictClose( metric_fn( [{"group": "a", "value": 1}, {"group": "a", "value": 1}, {"group": "b", "value": 0}], [{"value": 1}, {"value": 0}, {"value": 1}]), {"f1": 50.}) def test_auc(self): self.assertDictClose( metrics.auc([0, 0, 1, 1], [0.1, 0.4, 0.35, 0.8]), {"auc-roc": 0.75, "auc-pr": 0.8333}, places=4, ) def test_auc_non_binary(self): self.assertDictClose( metrics.auc([0.0, 0.2, 0.5, 0.7], [0.1, 0.4, 0.35, 0.8], targets_threshold=0.5), {"auc-roc": 0.75, "auc-pr": 0.8333}, places=4, ) def test_score_auc(self): self.assertDictClose( metrics.score_auc([0, 0, 1, 1], [0.1, 0.4, 0.35, 0.8]), { "auc-roc": 0.75, "auc-pr": 0.8333 }, places=4, ) def test_score_auc_non_binary(self): self.assertDictClose( metrics.score_auc([0.0, 0.2, 0.5, 0.7], [0.1, 0.4, 0.35, 0.8], targets_threshold=0.5), { "auc-roc": 0.75, "auc-pr": 0.8333 }, places=4, ) def test_sklearn_wrapper(self): mae_fn = metrics.sklearn_metrics_wrapper("mean_absolute_error") y_true = [[0.5, 1], [-1, 1], [7, -6]] y_pred = [[0, 2], [-1, 2], [8, -5]] self.assertDictClose( mae_fn(y_true, y_pred), {"mean_absolute_error": sklearn.metrics.mean_absolute_error(y_true, y_pred)}) hamming_fn = metrics.sklearn_metrics_wrapper( "hamming_loss", metric_dict_str="hamming_100x", metric_post_process_fn=lambda x: 100 * x) y_true = [1, 2, 3, 4] y_pred = [2, 2, 3, 4] self.assertDictClose( hamming_fn(y_true, y_pred), {"hamming_100x": 100 * sklearn.metrics.hamming_loss(y_true, y_pred)}) y_true = [0, 0, 2, 1] y_pred = [0, 1, 2, 1] matthews_corrcoef_fn = metrics.sklearn_metrics_wrapper( "matthews_corrcoef", metric_post_process_fn=lambda x: 100 * x) self.assertDictClose( matthews_corrcoef_fn(y_true, y_pred), {"matthews_corrcoef": 70.}) def test_rank_classification_default_weights(self): # num_classes = 2 self.assertDictClose( metrics.rank_classification( [ # 0 ((0, 0), True, 1.0, 1), ((0, 1), False, 1.0, 1), # 1 ((1, 0), False, 1.0, 1), ((1, 1), True, 1.0, 1), # 0 ((2, 0), True, 1.0, 1), ((2, 1), False, 1.0, 1), # 0 ((3, 0), True, 1.0, 1), ((3, 1), False, 1.0, 1), ], [ 0.1, 0.5, 1.0, 1.1, 0.3, 0.1, 0.6, 0.5 ], num_classes=2), { "accuracy": 75., "auc-pr": 50.0, "auc-roc": 66.6666667, "f1": 66.6666667, }) # num_classes = 3 self.assertDictClose( metrics.rank_classification( [ # 1 ((0, 0), False, 1.0, 1), ((0, 1), True, 1.0, 1), ((0, 2), False, 1.0, 1), # 0 ((1, 0), True, 1.0, 1), ((1, 1), False, 1.0, 1), ((1, 2), False, 1.0, 1), # 2 ((2, 0), False, 1.0, 1), ((2, 1), False, 1.0, 1), ((2, 2), True, 1.0, 1) ], [ 0.1, 0.5, 0.0, -2, -1, -3, 3.0, 3.1, 3.2 ], num_classes=3), { "accuracy": 66.6666667, "mean_3class_f1": 55.5555556, }) # num_classes = 3, multi-label self.assertDictClose( metrics.rank_classification( [ # 1 ((0, 0), False, 1.0, 1), ((0, 1), True, 1.0, 1), ((0, 2), False, 1.0, 1), # 0, 2 ((1, 0), True, 1.0, 1), ((1, 1), False, 1.0, 1), ((1, 2), True, 1.0, 1), # 1, 2 ((2, 0), False, 1.0, 1), ((2, 1), True, 1.0, 1), ((2, 2), True, 1.0, 1) ], [ 0.1, 0.5, 0.0, -2, -1, -3, 3.0, 3.1, 3.2 ], num_classes=3), { "accuracy": 66.6666667, }) # num_classes = None, multi-answer self.assertDictClose( metrics.rank_classification( [ # 1 ((0, 0), False, 1.0, 1), ((0, 1), True, 1.0, 1), # 0, 3 ((1, 0), True, 1.0, 1), ((1, 1), False, 1.0, 1), ((1, 2), True, 1.0, 1), # 0 ((2, 0), True, 1.0, 1) ], [ 0.1, 0.5, -2, -1, -3, 3.0 ], num_classes=None), { "accuracy": 66.6666667, }) def test_rank_classification_custom_weights(self): # num_classes = 2 self.assertDictClose( metrics.rank_classification( [ # 0 ((0, 0), True, 0.2, 1), ((0, 1), False, 0.2, 1), # 1 ((1, 0), False, 1.0, 1), ((1, 1), True, 1.0, 1), # 0 ((2, 0), True, 0.8, 1), ((2, 1), False, 0.8, 1), # 0 ((3, 0), True, 0.5, 1), ((3, 1), False, 0.5, 1), ], [ 0.1, 0.5, 1.0, 1.1, 0.3, 0.1, 0.6, 0.5 ], num_classes=2), { "accuracy": 92.0, "auc-pr": 83.3333333, "auc-roc": 86.6666667, "f1": 90.9090909, }) # num_classes = 3 self.assertDictClose( metrics.rank_classification( [ # 1 ((0, 0), False, 0.2, 1), ((0, 1), True, 0.2, 1), ((0, 2), False, 0.2, 1), # 0 ((1, 0), True, 0.5, 1), ((1, 1), False, 0.5, 1), ((1, 2), False, 0.5, 1), # 2 ((2, 0), False, 1.0, 1), ((2, 1), False, 1.0, 1), ((2, 2), True, 1.0, 1) ], [ 0.1, 0.5, 0.0, -2, -1, -3, 3.0, 3.1, 3.2 ], num_classes=3), { "accuracy": 70.5882353, "mean_3class_f1": 48.1481481, }) # num_classes = None, multi-answer self.assertDictClose( metrics.rank_classification( [ # 1 ((0, 0), False, 0.2, 1), ((0, 1), True, 0.2, 1), # 0, 3 ((1, 0), True, 0.5, 1), ((1, 1), False, 0.5, 1), ((1, 2), True, 0.5, 1), # 1 ((2, 0), True, 1.0, 1) ], [ 0.1, 0.5, -2, -1, -3, 3.0 ], num_classes=None), { "accuracy": 70.5882353, }) def test_rank_classification_shuffled(self): # num_classes = 2 self.assertDictClose( metrics.rank_classification( [ ((3, 0), True, 0.5, 1), ((0, 0), True, 0.2, 1), ((1, 0), False, 1.0, 1), ((1, 1), True, 1.0, 1), ((2, 0), True, 0.8, 1), ((2, 1), False, 0.8, 1), ((3, 1), False, 0.5, 1), ((0, 1), False, 0.2, 1), ], [ 0.6, 0.1, 1.0, 1.1, 0.3, 0.1, 0.5, 0.5, ], num_classes=2), { "accuracy": 92.0, "auc-pr": 83.3333333, "auc-roc": 86.6666667, "f1": 90.9090909, }) # num_classes = 3 self.assertDictClose( metrics.rank_classification( [ ((0, 0), False, 0.2, 1), ((2, 1), False, 1.0, 1), ((0, 1), True, 0.2, 1), ((1, 0), True, 0.5, 1), ((1, 1), False, 0.5, 1), ((1, 2), False, 0.5, 1), ((0, 2), False, 0.2, 1), ((2, 0), False, 1.0, 1), ((2, 2), True, 1.0, 1) ], [ 0.1, 3.1, 0.5, -2, -1, -3, 0.0, 3.0, 3.2 ], num_classes=3), { "accuracy": 70.5882353, "mean_3class_f1": 48.1481481, }) # num_classes = None, multi-answer self.assertDictClose( metrics.rank_classification( [ ((0, 0), False, 0.2, 1), ((2, 0), True, 1.0, 1), ((0, 1), True, 0.2, 1), ((1, 2), True, 0.5, 1), ((1, 0), True, 0.5, 1), ((1, 1), False, 0.5, 1), ], [ 0.1, 3.0, 0.5, -3, -2, -1, ], num_classes=None), { "accuracy": 70.5882353, }) def test_rank_classification_normalized(self): # num_classes = 2 self.assertDictClose( metrics.rank_classification( [ # 0 ((0, 0), True, 1.0, 5), ((0, 1), False, 1.0, 10), # 1 ((1, 0), False, 1.0, 2), ((1, 1), True, 1.0, 3), # 0 ((2, 0), True, 1.0, 5), ((2, 1), False, 1.0, 6), # 0 ((3, 0), True, 1.0, 3), ((3, 1), False, 1.0, 2), ], [ 0.5, 5.0, 2.0, 3.3, 1.5, 0.6, 1.8, 1.0 ], num_classes=2, normalize_by_target_length=True,), { "accuracy": 75., "auc-pr": 50.0, "auc-roc": 66.6666667, "f1": 66.6666667, }) def test_rank_classification_raise(self): with self.assertRaisesWithLiteralMatch( ValueError, "`targets` should contain 4 elements but has 2."): metrics.rank_classification( [ ((0, 0), True), ((0, 1), True), ], [ 0.1, 0.5 ], num_classes=2) with self.assertRaisesWithLiteralMatch( ValueError, "The first element of `targets` ('idx') should be 2-dimensional. Got " "0."): metrics.rank_classification( [ (0, True, 1.0, 1), (0, True, 1.0, 1), ], [ 0.1, 0.5 ], num_classes=2) def test_coqa_tokenize(self): self.assertEqual(metrics._coqa_tokenize("Maru the cat"), ["maru", "cat"]) self.assertEqual(metrics._coqa_tokenize("Maru cat"), ["maru", "cat"]) self.assertEqual(metrics._coqa_tokenize("Maru the cat."), ["maru", "cat"]) def test_sequence_f1(self): self.assertEqual(metrics._sequence_f1([], []), 1.0) self.assertEqual(metrics._sequence_f1([], ["cat"]), 0.0) self.assertEqual(metrics._sequence_f1(["cat"], []), 0.0) self.assertEqual(metrics._sequence_f1(["dog"], ["cat"]), 0.0) self.assertAlmostEqual(metrics._sequence_f1(["cat", "dog"], ["cat"]), 2 / 3) self.assertAlmostEqual(metrics._sequence_f1(["cat"], ["cat", "dog"]), 2 / 3) def test_coqa_f1(self): self.assertDictClose( metrics.coqa_f1([["jump box"], ["maru"]], ["jump", "cat"]), {"f1": 1 / 3 * 100}) self.assertDictClose( metrics.coqa_f1([["jump the box"], ["maru"]], ["jump", "cat"]), {"f1": 1 / 3 * 100}) self.assertDictClose( metrics.coqa_f1([["jump the box", "climb box"]], ["jump box"]), {"f1": 100}) def test_edit_distance(self): results = metrics.edit_distance( ["This is a sentence."], ["This is a different SENTENCE."]) self.assertDictClose( results, { "max_edit": 1, "mean_edit": 1.0, "median_edit": 1.0, "min_edit": 1, "sum_edit": 1 }) results = metrics.edit_distance( ["This is a sentence."], ["This is a different SENTENCE."], lower=False) self.assertDictClose( results, { "max_edit": 2, "mean_edit": 2.0, "median_edit": 2.0, "min_edit": 2, "sum_edit": 2 }) results = metrics.edit_distance( ["Non-ascii separate."], ["Non-ascii🙂separate."], lower=False) self.assertDictClose( results, { "max_edit": 0, "mean_edit": 0.0, "median_edit": 0.0, "min_edit": 0, "sum_edit": 0 }) if __name__ == "__main__": absltest.main()
google-research/text-to-text-transfer-transformer
t5/evaluation/metrics_test.py
Python
apache-2.0
19,933
[ "MOOSE" ]
7d527d6def8b7a66b9a74fa2703e49d44bc5bf7b65dd15d631ba8c09e998016c
import numpy as np def cnn_setup(params): """ Defining convolutional architecture. Arguments: Parameters defining the entire model. """ class cnn_layer(): def __init__(self, layerType, dimFilters, nFilters, stride, border='valid', doNoise = True, doBN = True): self.type = layerType self.filter = dimFilters self.maps = nFilters self.stride = stride self.border = border self.noise = doNoise self.bn = doBN cl1 = cnn_layer('conv', (3, 3), (3, 96), (1, 1), 'valid') cl2 = cnn_layer('conv', (3, 3), (96, 96), (1, 1), 'full') cl2alt = cnn_layer('conv', (3, 3), (96, 96), (1, 1), 'full') cl3 = cnn_layer('pool', (2, 2), (96, 96), (2, 2), 'dummy', 1, 1) # dummy is not used cl3alt = cnn_layer('conv', (3, 3), (96, 96), (2, 2), 'valid') # ? stride = 2? cl4 = cnn_layer('conv', (3, 3), (96, 192), (1, 1), 'valid') cl5 = cnn_layer('conv', (3, 3), (192, 192), (1, 1), 'full') cl6 = cnn_layer('conv', (3, 3), (192, 192), (1, 1), 'valid') cl7 = cnn_layer('pool', (2, 2), (192, 192), (2, 2), 'dummy', 1, 1) cl7alt = cnn_layer('conv', (3, 3), (192, 192), (2, 2),'valid') # ? stride = 2? cl8 = cnn_layer('conv', (3, 3), (192, 192), (1, 1), 'valid') cl9 = cnn_layer('conv', (1, 1), (192, 192), (1, 1), 'valid') cl10 = cnn_layer('conv', (1, 1), (192, 10), (1, 1), 'valid', 0, 0) cl11 = cnn_layer('average+softmax', (6, 6), (10, 10), (6, 6), 'dummy', 0, 0) cl11alt = cnn_layer('average', (6, 6), (10, 10), (6, 6), 0, 0, 0) cl12alt = cnn_layer('softmax', (6, 6), (10, 10), (1, 1), 0, 0, 0) if params.dataset == 'mnist': cl1 = cnn_layer('conv', (3, 3), (1, 96), (1, 1)) if params.cnnType == 'all_conv': cnn_layers = [cl1, cl2, cl3alt, cl4, cl5, cl7alt, cl8, cl9, cl10, cl11] elif params.cnnType == 'ladder_baseline': cnn_layers = [cl1, cl2, cl2alt, cl3, cl4, cl5, cl6, cl7, cl8, cl9, cl10, cl11] else: cnn_layers = [cl1, cl2, cl3, cl4, cl5, cl7, cl8, cl9, cl10, cl11] return cnn_layers def setup(replace_params=None): ''' Defining entire neural network model and methods for training. Arguments: Dictionary of the form {'paramName': paramValue}. E.g. replace_params = {'useT2': False, 'learnRate1': 0.1} ''' ones = np.ones(20) class Params(): def __init__(self, opt): print(opt) # general setting self.verbose = opt.verbose self.model = opt.model self.dataset = opt.dataset self.batchSize1 = 128 self.batchSize2 = 128 self.maxEpoch = 1 self.seed = 1234 self.cnnType = 'ladder_baseline' self.cnnNonlin = 'leaky_relu' # meta-backward self.meta_bw = True self.useVal = 0 self.saveName = 'result.pkl' # PREPROCESSING self.predata = opt.predata self.predataName = self.dataset + '_preprocess.npz' self.ratioHyper = 0.2 # elementary set : hyper set self.ratioValid = opt.ratioValid # how much of T2 goes to validatio set self.preProcess = 'global_contrast_norm' # what input preprocessing? 'None'/'m0'/'m0s1'/'minMax'/'pca'/'global_contrast_norm'/'zca'/'global_contrast_norm+zca' self.preContrast = 'None' # nonlinear transform over input? 'None'/'tanh'/'arcsinh'/'sigmoid' # ARCHITECTURE self.nHidden = [784, 1000, 1000, 1000, 10] # how many hidden units in each layer? self.activation = ['relu','relu','relu','softmax'] # what nonlinearities in each layer? self.nLayers = len(self.nHidden)-1 # how many layers are there? # BATCH NORMALIZATION self.batchNorm = False # use batch normalization? self.aFix = True # fix scalling parameter? self.movingAvMin = 0.10 # moving average paramerer? [0.05-0.20] self.movingAvStep = 1 # moving average step size? self.evaluateTestInterval = 25 # how often compute the "exact" BN parameters? i.e. replacing moving average with the estimate from the whole training data self.m = 550 # when computing "exact" BN parameters, average over how many samples from training set? self.testBN = 'default' # when computing "exact" BN parameters, how? 'default'/'proper'/'lazy' self.poolBNafter = False # REGULARIZATION self.rglrzTrain = ['L2'] # which rglrz are trained? (which are available? see: rglrzInitial) self.rglrz = ['L2'] # which rglrz are used? self.rglrzPerUnit = [] # which rglrz are defined per hidden unit? (default: defined one per layer) self.rglrzPerMap = [] # which rglrz are defined per map? (for convnets) self.rglrzPerNetwork = [] # which rglrz are defined per network? self.rglrzPerNetwork1 = [] # which rglrz are defined per network? BUT have a separate param for the first layer self.rglrzInitial = {'L1': 0.*ones, # initial values of rglrz 'L2': 0.001*ones, 'LmaxCutoff': 0.*ones, # soft cutoff param1 'LmaxSlope': 0.*ones, # soft cutoff param2 'LmaxHard': 2.*ones, # hard cutoff aka maxnorm 'addNoise' : 0.3*ones, 'inputNoise' : [0.], # only input noise (if trained, need be PerNetwork) 'dropOut': [0.2]+20*[0.5], 'dropOutB': [0.2]+20*[0.5]} # shared dropout pattern within batch self.rglrzLR = {'L1': 0.0001, # scaling factor for learning rate: corresponds to hyperparameters (expected) order of magnitude 'L2': 0.001, 'LmaxCutoff': 0.1, 'LmaxSlope': 0.0001, 'addNoise' : 1., 'inputNoise' : 1.} # REGULARIZATION: noise specific self.noiseupSoftmax = False # is there noise in the softmax layer? self.noiseWhere = 'type1' # where is noise added at input? 'type0' - after non-linearity, 'type1' - before non-linearity self.noiseT1 = 'None' # type of gaussian noise? 'None'/'multi0'/'multi1'/'fake_drop' --> (x+n)/x*n/x*(n+1)/x*s(n) # TRAINING: COST self.cost = 'categorical_crossentropy' # cost for T1? 'L2'/'categorical_crossentropy' self.cost_T2 = 'categorical_crossentropy' # TODO # cost for T2? 'L2'/'crossEntropy' TODO: 'sigmoidal'/'hingeLoss' self.penalize_T2 = False # apply penalty for T2? self.cost2Type = 'default' # type of T1T2 cost 'default'/'C2-C1' # TRAINING: T2 FD or exact self.finiteDiff = False # use finite difference for T2? self.FDStyle = '3' # type of finite difference implementation '2'/'3' self.checkROP = False # TODO # check ROP operator efficiency self.T2gradDIY = False # TODO # use your own ROP operator self.T2onlySGN = False # consider only the sign for T2 update, not the amount # TRAINING: OPTIMIZATION self.learnRate1 = 0.002 # T1 max step size self.learnRate2 = 0.001 # T2 max step size self.learnFun1 = 'olin' # learning rate schedule for T1? (see LRFunctions for options) self.learnFun2 = 'None' # learning rate schedule for T2? self.opt1 = 'adam' # optimizer for T1? 'adam'/None (None is SGD) self.opt2 = 'adam' # optimizer for T2? 'adam'/None (None is SGD) self.use_momentum = False # applies both to T1 and T2, set the terms to 0 for either if want to disable for one self.momentum1 = [0.5, 0.9] # T1 max and min momentum values self.momentum2 = [0.5, 0.9] # T2 max and min momentum values self.momentFun = 'exp' # momentum decay function self.halfLife = 1 # decay function parameter, set to be at halfLife*10,000 updates later self.triggerT2 = 0. # when to start training with T2 self.hessian_T2 = False # apply penalty for T2? self.avC2grad = 'None' # taking averaging of C2grad and how? 'None'/'adam'/'momentum' self.decayT2 = 0. # decay factor for T2 params self.MM = 1 # TODO? # for stochastic net: how many parallel samples do we take? IMPORTANCE: could be used to train discrete hyperparameters as well # TRAINING: OTHER # TRACKING, PRINTING self.trackPerEpoch = 1 # how often within epoch track error? self.printInterval = 10 # how often print error? self.printBest = 40000 # each updates print best value? self.activTrack = ['mean', 'std', 'max', # what network statistics are you following? 'const', 'spars', 'wmean', 'wstd', 'wmax', 'rnoise', 'rnstd', 'bias', 'a'] self.forVideo = ['a', 'b', 'h'] # takes a sample of say 100-200 of those from each layer self.showGrads = False # do you show gradient values? # self.listGrads = ['grad', 'grad_rel', 'grad_angle', 'grad_max', 'p_t', 'p_t_rel', 'p_t_angle', 'p_t_max'] self.listGrads = ['grad', 'grad_angle', 'p_t', 'p_t_angle'] # which gradient measures to track? self.trackGrads = False # monitor gradients during training? self.trackStats = False # monitor layer and weight statistics during training? self.track4Vid = False # TODO: monitor for creating animation self.track1stFeatures = False # TODO: monitor 1st layer features # replace default parameters params = Params(replace_params) # for key, val in replace_params.iteritems(): # assert hasattr(params, key), 'Setting %s does not exist' % key # setattr(params, key, val) # additional parameters if convolutional network if params.model == 'convnet': params.convLayers = cnn_setup(params) params.nLayers = len(params.convLayers) else: assert len(params.nHidden) == len(params.activation)+1 params.nLayers = len(params.nHidden)-1 # change dimensions for cifar-10 and svhn if params.dataset in ['cifar10', 'svhn']: params.nHidden[0] = 3*1024 if (not params.meta_bw) or (params.rglrz == []): params.rglrzTrain = [] params.meta_bw = False return params
bigaidream-projects/drmad
gpu_ver/refcode/setup.py
Python
mit
13,270
[ "Gaussian" ]
cadd85567df2939aa67d38d38bdac6a85acff35ee6c90af9cd96c9a29030a855
#coding=utf8 from __future__ import unicode_literals import sys import os import shutil import re import xlrd import yaml import alias import typedef import lseri reload(sys) sys.setdefaultencoding("utf8") import codecs def cp65001(name): if name.lower() == "cp65001": return codecs.lookup("utf8") codecs.register(cp65001) g_error_l = [] g_alias_d = None g_alias_deps = None g_struct_deps = {} g_sheet_struct_deps = {} fp_log = None def fprint(msg): global fp_log if not fp_log: fp_log = open("log.txt", "w") fp_log.write(msg) fp_log.write(os.linesep) fp_log.flush() output_list = [] def output(msg): output_list.append(msg) return def flush_output(): s = lseri.tolua(output_list) #fprint(s) sys.stdout.write(s) sys.stdout.flush() def error(msg): s = lseri.tolua({"error":msg}) sys.stdout.write(s) sys.stdout.flush() sys.exit(1) def print_exc(): import traceback traceback.print_exc() # 基础数据类型:int, float, string _bool_d = {"true":True,"false":False,"1":True,"0":False,"1.0":True,"0.0":False} def _conv_bool(s): return _bool_d[str(s).lower()] basictype_convert_tbl = { "int":int, "float":float, "bool":_conv_bool, "string":lambda s:str(s).encode("utf8"), } # 自定义struct g_struct_d = None # 容器数据类型: list<T>, dict<T1, T2> CONTAINER_RE = re.compile("^(list|dict)<(.+?)(?:,\s*(.+?))??>$") #m = CONTAINER_RE.match(u"list<int>") #print(m.groups()) def find_struct_deps(type_s): if type_s in basictype_convert_tbl: return None if type_s in g_struct_d: if type_s in g_struct_deps: return ["s", g_struct_deps[type_s]] else: return None m = CONTAINER_RE.match(type_s) sg = m.groups() if sg[0] == "list": if sg[1] in g_struct_deps: return ["l", g_struct_deps[sg[1]]] elif sg[0] == "dict": if sg[2] in g_struct_deps: return ["d", g_struct_deps[sg[2]]] return None def get_basic_or_struct_cf(s): if s in basictype_convert_tbl: return basictype_convert_tbl[s] if s not in g_struct_d: return None cfg = g_struct_d[s] def cf(cont): if cont == "": return None l = cont.split("|") assert len(l)==len(cfg), cont ret = {} for idx, v in enumerate(l): ret[cfg[idx].keys()[0]] = basictype_convert_tbl[cfg[idx].values()[0]](v) return ret return cf def make_convert_func(type_s): cf = get_basic_or_struct_cf(type_s) if cf: return cf m = CONTAINER_RE.match(type_s) sg = m.groups() assert len(sg) == 3, "类型解析错误" if sg[0] == "list": assert not sg[2], "list定义有误" typ = sg[1] f = get_basic_or_struct_cf(typ) assert f, "list元素类型定义有误:%s"%type_s def cf(s): # xls单元格会默认把数字格式转换成float if typ == "int" and isinstance(s, float): s = str(int(s)) return [f(i) for i in s.split()] return cf elif sg[0] == "dict": k_f = basictype_convert_tbl[sg[1]] assert k_f, "dict key类型定义有误%s"%type_s v_f = get_basic_or_struct_cf(sg[2]) assert v_f, "dict value类型定义有误:%s"%type_s def cf(s): d = {} for i in s.split(): l = i.split(":") d[k_f(l[0])] = v_f(l[1]) return d return cf raise Exception("未定义类型:%s"%type_s) TYPE_DEFAULT_RE = re.compile("^default(?:\((.*)\))?$") #m = TYPE_DEFAULT_RE.match("default") #print m.group(2) TAG_KEY_RE = re.compile("^key(?:\((.*)\))?$") type_default_tbl = { "int":0, "float":0.0, "string":"", "bool":False, } def parse_type_tag(ncol, tag_sl, type_s, conv_f): ret = {} def _key_f(): assert ncol == 0, "key必须是第一列" assert type_s == "int" or type_s == "string", "类型:<%s>不能做key"%type_s ret["key"] = True def _ignore_f(): ret["ignore"] = True def _raw_f(): ret["raw"] = True def _key_alias_f(): assert ncol == 1, "key_alias必须是第二列" ret["key_alias"] = True def _index_f(): ret["index"] = True def _defaultnil_f(): ret["default"] = None tag_fs = { #"key":_key_f, "ignore":_ignore_f, "raw":_raw_f, "key_alias":_key_alias_f, "index":_index_f, "defaultnil": _defaultnil_f, } for tag_s in tag_sl: if tag_s in tag_fs: #assert tag_s in tag_fs, "标签填写错误:<%s>"%tag_s tag_fs[tag_s]() continue # default 处理 m = TYPE_DEFAULT_RE.match(tag_s) if m: assert "default" not in ret, "重复设置default" default_val = m.group(1) if not default_val and type_s in type_default_tbl: default_val = type_default_tbl[type_s] else: default_val = conv_f(default_val if default_val else "") ret["default"] = default_val continue # key 处理 m = TAG_KEY_RE.match(tag_s) if m: assert ncol == 0, "key必须是第一列" assert type_s == "int" or type_s == "string", "类型:<%s>不能做key"%type_s d = {} key_attr = m.group(1) if key_attr: assert key_attr == "incr", "key的属性只能是incr" assert type_s == "int", "incr key只能是int类型" d["incr"] = True ret["key"] = d continue raise Exception(tag_s) # 容器类型list和dict默认就是空 if get_basic_or_struct_cf(type_s) == None and "default" not in ret: ret["default"] = conv_f("") assert not ("key" in ret and "default" in ret), "key类型不能设置default" assert not ("key" in ret and "ignore" in ret), "key类型不能设置ignore" assert not ("key" in ret and "index" in ret), "key类型不能设置index" return ret def open_xls(filename): return xlrd.open_workbook(filename) # 返回的是下标 def _find_dup_in_list(l): d = {} for n, i in enumerate(l): if i in d: return n d[i] = 1 return -1 def _num2colname(n): def _n2chr(i): return chr(65+i) if n < 26: return _n2chr(n) elif n < 26*27: return _n2chr(n/26-1) + _n2chr(n%26) return str(n) EXPORT_ALL_RE = re.compile("^all(?:-\[(.*)\])?$") def _parse_export(s): if not s: return None, None m = EXPORT_ALL_RE.match(s) if m: exclude = m.group(1) if not exclude: return "all", [] else: return "all", [i.strip() for i in exclude.split(",")] else: return None, None def sheet_to_dict(sheet, alias_d): conv_funcs = [] tags = [] struct_deps_l = [] struct_deps_d = {} alias_deps = alias_d.get("deps", {}) if alias_d else {} export_type = alias_d.get("export") if alias_d else None alias_d = alias_d.get("alias") if alias_d else None try: # 处理第一行,类型 end_col = None for n, i in enumerate(sheet.row_values(0)): end_col = n + 1 # 允许类型列填空,意味该列忽略 if n > 0 and i == "" : end_col = n break type_sl = i.split("#") conv_f = make_convert_func(type_sl[0]) conv_funcs.append(conv_f) tags.append(parse_type_tag(n, type_sl[1:], type_sl[0], conv_f)) struct_deps_l.append(find_struct_deps(type_sl[0])) except Exception, e: raise Exception("sheet:<%s>类型行%s列填写错误, 内容:<%s>, msg:%s"%(sheet.name, _num2colname(n), i, e)) name_row = sheet.row_values(1,end_colx=end_col) dup_idx = _find_dup_in_list(name_row) if dup_idx != -1: raise Exception("sheet:<%s>列名重复:<%s>"%(sheet.name, name_row[dup_idx])) col_names = [] if alias_d: export_flag, export_exmsg = _parse_export(export_type) export_all = export_flag == "all" for name in name_row: col_names.append(alias_d.get(name, name if export_all and name not in export_exmsg else None)) else: col_names = name_row for n, i in enumerate(col_names): if i and struct_deps_l[n]: struct_deps_d[i] = struct_deps_l[n] check_tag_f = lambda x,s: True if s in x else False raw_flag = check_tag_f(tags[0],"raw") raw_keys = {} key_flag = check_tag_f(tags[0],"key") key_incr_flag = check_tag_f(tags[0]["key"], "incr") if key_flag else False last_key = [0,] key_alias_flag = check_tag_f(tags[1],"key_alias") if len(tags) > 1 else False ret = {} if key_flag and not raw_flag else [] key_alias_d = {} if key_alias_flag else None for nrow in xrange(2, sheet.nrows): row = sheet.row_values(nrow, end_colx=end_col) row_d = {} try: # 注释行忽略 if isinstance(row[0], unicode) and row[0].startswith("//"): continue row_key = None row_key_alias = None for ncol, value in enumerate(row): tag = tags[ncol] col_name = col_names[ncol] if not col_name: # key和key_alias列走流程可以不导出 is_key = key_flag and "key" in tag is_alias = key_alias_flag and "key_alias" in tag if not is_key and not is_alias: continue else: if "index" in tag and col_name not in struct_deps_d: if alias_d and col_name not in alias_deps: raise Exception("%s填写了index但没有定义依赖"%col_name) cv = None if "ignore" in tag: continue # 如果该格子不填,获取的是空串 if value == "" and "default" in tag: cv = tag["default"] else: # raw key 列可以为空 if not raw_flag: assert value != "", "表项为空" cv = conv_funcs[ncol](value) else: if value != "": cv = conv_funcs[ncol](value) if ncol == 0 and "key" in tag: row_key = cv if ncol == 1 and "key_alias" in tag: row_key_alias = cv if col_name: row_d[col_name] = cv def _check_key(check_d): # 检查key是否重复 # raw表,key列可能是None,不用检查了 if raw_flag and row_key == None: return assert row_key not in check_d, "key列内容重复, 行:%s,值:%s"%(nrow+1, row_key) check_d[row_key] = row_d if key_incr_flag: assert row_key == last_key[0] + 1, "incr key 不连续:%d"%row_key last_key[0] = row_key if key_alias_flag: assert row_key_alias not in key_alias_d, "key_alias列内容重复, 行:%s,值:%s"%(nrow+1, row_key_alias) key_alias_d[row_key_alias] = row_key if isinstance(ret, dict): _check_key(ret) else: if raw_flag and key_flag: _check_key(raw_keys) ret.append(row_d) except Exception, e: # print_exc() raise Exception("sheet:%s, cell:<行%s-列%s>, %s"%(sheet.name, nrow+1, _num2colname(ncol), e)) return ret, struct_deps_d, key_alias_d def get_alias_conf(fn, shname): if fn in g_alias_d: return g_alias_d[fn] key = alias.make_key(fn, shname) return g_alias_d.get(key) def convert_xls(filename): try: wb = open_xls(filename) ret = {} ext = {} for sheet in wb.sheets(): if sheet.name.startswith("_"): continue if sheet.nrows < 2: continue data, deps_d, key_alias_d = sheet_to_dict(sheet, get_alias_conf(filename, sheet.name)) ret[sheet.name] = data d = {} if len(deps_d) > 0: d["deps"] = deps_d d["typ"] = "l" if type(data) == type([]) else "d" d["key_alias"] = key_alias_d if d["typ"] == "l" and key_alias_d: tmp = {} for k, v in key_alias_d.iteritems(): tmp[v] = k d["key_check"] = tmp ext[sheet.name] = d return ret, ext except Exception, e: # print_exc() error("file:%s, error: %s"%(filename, e)) def run_dir(path): os.chdir(path) files = [] def visit(arg, dirname, names): for name in names: if name.endswith(".xls") and not name.startswith("_"): files.append(os.path.relpath(os.path.join(dirname, name), ".")) os.path.walk(".", visit, None) for fn in files: fn = fn.replace(os.sep, "/") data, ext = convert_xls(fn) out = {} out["filename"] = fn out["data"] = data out["ext"] = ext #if len(deps_d) > 0: #out["struct_deps"] = deps_d output(lseri._tolua(out)) if __name__ == "__main__": fpath = sys.argv[1] import platform if platform.system() == "Windows": fpath = fpath.decode("gbk") else: fpath = fpath.decode("utf8") try: alias_raw, g_alias_d, g_alias_deps = \ alias.parse(fpath, sys.argv[2]) if len(alias_raw) > 0: output(lseri._tolua({"alias_fields":alias_raw})) if len(g_alias_deps) > 0: output(lseri._tolua({"alias_deps":g_alias_deps})) g_struct_d = typedef.parse(os.path.join(fpath, "struct.yaml")) g_struct_deps = typedef.parse_deps(os.path.join(sys.argv[2], "struct_deps.yaml"), g_struct_d) if len(g_struct_deps) > 0: output(lseri._tolua({"struct_deps":g_struct_deps})) except Exception, e: error(str(e)) run_dir(fpath) flush_output()
bttscut/xls-converter
converter/xlsparse.py
Python
mit
14,562
[ "VisIt" ]
960d84cb3b28d7fe7b0beb3cd71d3c8bb45ca0aabc5e643b6ca2f4f48b582dc0
""" Tornado-based HTTPs JobStateUpdate service. """ from DIRAC import gLogger from DIRAC.Core.Tornado.Server.TornadoService import TornadoService from DIRAC.WorkloadManagementSystem.Service.JobStateUpdateHandler import JobStateUpdateHandlerMixin sLog = gLogger.getSubLogger(__name__) class TornadoJobStateUpdateHandler(JobStateUpdateHandlerMixin, TornadoService): log = sLog
DIRACGrid/DIRAC
src/DIRAC/WorkloadManagementSystem/Service/TornadoJobStateUpdateHandler.py
Python
gpl-3.0
383
[ "DIRAC" ]
0afe3465e82c5f0f98fe5429d894d7d1dd40836a5da6cee5b11baf6c646ae1ad
from collections import deque def tarjan(g): """ Tarjan's algo for finding strongly connected components in a directed graph Uses two main attributes of each node to track reachability, the index of that node within a component(index), and the lowest index reachable from that node(lowlink). We then perform a dfs of the each component making sure to update these parameters for each node and saving the nodes we visit on the way. If ever we find that the lowest reachable node from a current node is equal to the index of the current node then it must be the root of a strongly connected component and so we save it and it's equireachable vertices as a strongly connected component. Complexity: strong_connect() is called at most once for each node and has a complexity of O(|E|) as it is DFS. Therefore this has complexity O(|V| + |E|) for a graph G = (V, E) """ n = len(g) stack = deque() on_stack = [False for _ in range(n)] index_of = [-1 for _ in range(n)] lowlink_of = index_of[:] def strong_connect(v, index, components): index_of[v] = index # the number when this node is seen lowlink_of[v] = index # lowest rank node reachable from here index += 1 stack.append(v) on_stack[v] = True for w in g[v]: if index_of[w] == -1: index = strong_connect(w, index, components) lowlink_of[v] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: lowlink_of[v] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: component = [] w = stack.pop() on_stack[w] = False component.append(w) while w != v: w = stack.pop() on_stack[w] = False component.append(w) components.append(component) return index components = [] for v in range(n): if index_of[v] == -1: strong_connect(v, 0, components) return components def create_graph(n, edges): g = [[] for _ in range(n)] for u, v in edges: g[u].append(v) return g if __name__ == "__main__": # Test n_vertices = 7 source = [0, 0, 1, 2, 3, 3, 4, 4, 6] target = [1, 3, 2, 0, 1, 4, 5, 6, 5] edges = [(u, v) for u, v in zip(source, target)] g = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
TheAlgorithms/Python
graphs/tarjans_scc.py
Python
mit
2,652
[ "VisIt" ]
fed4391976988dd4fed5ebc76eb46637fd2afe5a8a9116d7b27a66706d32a611
''' The application programming interface for Pyleoclim @author: fengzhu Created on Jan 31, 2020 ''' from ..utils import tsutils, plotting, mapping, lipdutils, tsmodel, tsbase from ..utils import wavelet as waveutils from ..utils import spectral as specutils from ..utils import correlation as corrutils from ..utils import causality as causalutils from ..utils import decomposition from ..utils import filter as filterutils #from textwrap import dedent import seaborn as sns import matplotlib.pyplot as plt import numpy as np import pandas as pd from tabulate import tabulate from collections import namedtuple from copy import deepcopy from matplotlib.ticker import ScalarFormatter, FormatStrFormatter, MaxNLocator import matplotlib.transforms as transforms from matplotlib import cm from matplotlib import gridspec import matplotlib as mpl #from matplotlib.colors import BoundaryNorm, Normalize import cartopy.crs as ccrs import cartopy.feature as cfeature from tqdm import tqdm from scipy.stats.mstats import mquantiles from scipy import stats from statsmodels.multivariate.pca import PCA import warnings import os import lipd as lpd def pval_format(p, threshold=0.01, style='exp'): ''' Print p-value with proper format when p is close to 0 ''' if p < threshold: if p == 0: if style == 'float': s = '< 0.000001' elif style == 'exp': s = '< 1e-6' else: raise ValueError('Wrong style.') else: n = int(np.ceil(np.log10(p))) if style == 'float': s = f'< {10**n}' elif style == 'exp': s = f'< 1e{n}' else: raise ValueError('Wrong style.') else: s = f'{p:.2f}' return s def dict2namedtuple(d): ''' Convert a dictionary to a namedtuple ''' tupletype = namedtuple('tupletype', sorted(d)) return tupletype(**d) def infer_period_unit_from_time_unit(time_unit): ''' infer a period unit based on the given time unit ''' if time_unit is None: period_unit = None else: unit_group = lipdutils.timeUnitsCheck(time_unit) if unit_group != 'unknown': if unit_group == 'kage_units': period_unit = 'kyrs' else: period_unit = 'yrs' else: if time_unit[-1] == 's': period_unit = time_unit else: period_unit = f'{time_unit}s' return period_unit def gen_ts(model, t=None, nt=1000, **kwargs): ''' Generate pyleoclim.Series with timeseries models Parameters ---------- model : str, {'colored_noise', 'colored_noise_2regimes', 'ar1'} the timeseries model to use - colored_noise : colored noise with one scaling slope - colored_noise_2regimes : colored noise with two regimes of two different scaling slopes - ar1 : AR(1) series t : array the time axis nt : number of time points only works if 't' is None, and it will use an evenly-spaced vector with nt points kwargs : dict the keyward arguments for the specified timeseries model Returns ------- ts : `pyleoclim.Series` See also -------- pyleoclim.utils.tsmodel.colored_noise : generate a colored noise timeseries pyleoclim.utils.tsmodel.colored_noise_2regimes : generate a colored noise timeseries with two regimes pyleoclim.utils.tsmodel.gen_ar1_evenly : generate an AR(1) series Examples -------- - AR(1) series .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo # default length nt=1000; default persistence parameter g=0.5 ts = pyleo.gen_ts(model='ar1') g = pyleo.utils.tsmodel.ar1_fit(ts.value) @savefig gen_ar1_t0.png fig, ax = ts.plot(label=f'g={g:.2f}') pyleo.closefig(fig) # use 'nt' to modify the data length ts = pyleo.gen_ts(model='ar1', nt=100) g = pyleo.utils.tsmodel.ar1_fit(ts.value) @savefig gen_ar1_t1.png fig, ax = ts.plot(label=f'g={g:.2f}') pyleo.closefig(fig) # use 'settings' to modify the persistence parameter 'g' ts = pyleo.gen_ts(model='ar1', g=0.9) g = pyleo.utils.tsmodel.ar1_fit(ts.value) @savefig gen_ar1_t2.png fig, ax = ts.plot(label=f'g={g:.2f}') pyleo.closefig(fig) - Colored noise with 1 regime .. ipython:: python :okwarning: :okexcept: # default scaling slope 'alpha' is 1 ts = pyleo.gen_ts(model='colored_noise') psd = ts.spectral() # estimate the scaling slope psd_beta = psd.beta_est(fmin=1/50, fmax=1/2) print(psd_beta.beta_est_res['beta']) # visualize @savefig gen_cn_t0.png fig, ax = psd.plot() pyleo.closefig(fig) # modify 'alpha' with 'settings' ts = pyleo.gen_ts(model='colored_noise', alpha=2) psd = ts.spectral() # estimate the scaling slope psd_beta = psd.beta_est(fmin=1/50, fmax=1/2) print(psd_beta.beta_est_res['beta']) # visualize @savefig gen_cn_t1.png fig, ax = psd.plot() pyleo.closefig(fig) - Colored noise with 2 regimes .. ipython:: python :okwarning :okexcept: # default scaling slopes 'alpha1' is 0.5 and 'alpha2' is 2, with break at 1/20 ts = pyleo.gen_ts(model='colored_noise_2regimes') psd = ts.spectral() # estimate the scaling slope psd_beta_lf = psd.beta_est(fmin=1/50, fmax=1/20) psd_beta_hf = psd.beta_est(fmin=1/20, fmax=1/2) print(psd_beta_lf.beta_est_res['beta']) print(psd_beta_hf.beta_est_res['beta']) # visualize @savefig gen_cn2_t0.png fig, ax = psd.plot() pyleo.closefig(fig) # modify the scaling slopes and scaling break with 'settings' ts = pyleo.gen_ts(model='colored_noise_2regimes', alpha1=2, alpha2=1, f_break=1/10) psd = ts.spectral() # estimate the scaling slope psd_beta_lf = psd.beta_est(fmin=1/50, fmax=1/10) psd_beta_hf = psd.beta_est(fmin=1/10, fmax=1/2) print(psd_beta_lf.beta_est_res['beta']) print(psd_beta_hf.beta_est_res['beta']) # visualize @savefig gen_cn2_t1.png fig, ax = psd.plot() pyleo.closefig(fig) ''' if t is None: t = np.arange(nt) tsm = { 'colored_noise': tsmodel.colored_noise, 'colored_noise_2regimes': tsmodel.colored_noise_2regimes, 'ar1': tsmodel.gen_ar1_evenly, } tsm_args = {} tsm_args['colored_noise'] = {'alpha': 1} tsm_args['colored_noise_2regimes'] = {'alpha1': 1/2, 'alpha2': 2, 'f_break': 1/20} tsm_args['ar1'] = {'g': 0.5} tsm_args[model].update(kwargs) v = tsm[model](t=t, **tsm_args[model]) ts = Series(time=t, value=v) return ts class Series: ''' pyleoSeries object The Series class is, at its heart, a simple structure containing two arrays y and t of equal length, and some metadata allowing to interpret and plot the series. It is similar to a pandas Series, but the concept was extended because pandas does not yet support geologic time. Parameters ---------- time : list or numpy.array independent variable (t) value : list of numpy.array values of the dependent variable (y) time_unit : string Units for the time vector (e.g., 'years'). Default is 'years' time_name : string Name of the time vector (e.g., 'Time','Age'). Default is None. This is used to label the time axis on plots value_name : string Name of the value vector (e.g., 'temperature') Default is None value_unit : string Units for the value vector (e.g., 'deg C') Default is None label : string Name of the time series (e.g., 'Nino 3.4') Default is None clean_ts : boolean flag set to True to remove the NaNs and make time axis strictly prograde with duplicated timestamps reduced by averaging the values Default is True verbose : bool If True, will print warning messages if there is any Examples -------- In this example, we import the Southern Oscillation Index (SOI) into a pandas dataframe and create a pyleoSeries object. .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data=pd.read_csv( 'https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv', skiprows=0, header=1 ) time=data.iloc[:,1] value=data.iloc[:,2] ts=pyleo.Series( time=time, value=value, time_name='Year (CE)', value_name='SOI', label='Southern Oscillation Index' ) ts ts.__dict__.keys() ''' def __init__(self, time, value, time_name=None, time_unit=None, value_name=None, value_unit=None, label=None, clean_ts=True, verbose=False): if clean_ts==True: value, time = tsbase.clean_ts(np.array(value), np.array(time), verbose=verbose) self.time = time self.value = value self.time_name = time_name self.time_unit = time_unit self.value_name = value_name self.value_unit = value_unit self.label = label self.clean_ts=clean_ts self.verbose=verbose def convert_time_unit(self, time_unit='years'): ''' Convert the time unit of the Series object Parameters ---------- time_unit : str the target time unit, possible input: { 'year', 'years', 'yr', 'yrs', 'y BP', 'yr BP', 'yrs BP', 'year BP', 'years BP', 'ky BP', 'kyr BP', 'kyrs BP', 'ka BP', 'ka', 'my BP', 'myr BP', 'myrs BP', 'ma BP', 'ma', } Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data = pd.read_csv( 'https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv', skiprows=0, header=1 ) time = data.iloc[:,1] value = data.iloc[:,2] ts = pyleo.Series(time=time, value=value, time_unit='years') new_ts = ts.convert_time_unit(time_unit='yrs BP') print('Original timeseries:') print('time unit:', ts.time_unit) print('time:', ts.time) print() print('Converted timeseries:') print('time unit:', new_ts.time_unit) print('time:', new_ts.time) ''' new_ts = self.copy() if time_unit is not None: tu = time_unit.lower() if tu.find('ky')>=0 or tu.find('ka')>=0: time_unit_label = 'ky BP' elif tu.find('my')>=0 or tu.find('ma')>=0: time_unit_label = 'my BP' elif tu.find('y bp')>=0 or tu.find('yr bp')>=0 or tu.find('yrs bp')>=0 or tu.find('year bp')>=0 or tu.find('years bp')>=0: time_unit_label = 'yrs BP' elif tu.find('yr')>=0 or tu.find('year')>=0 or tu.find('yrs')>=0 or tu.find('years')>=0: time_unit_label = 'yrs' else: raise ValueError(f"Input time_unit={time_unit} is not supported. Supported input: 'year', 'years', 'yr', 'yrs', 'y BP', 'yr BP', 'yrs BP', 'year BP', 'years BP', 'ky BP', 'kyr BP', 'kyrs BP', 'ka BP', 'my BP', 'myr BP', 'myrs BP', 'ma BP'.") else: return new_ts def convert_to_years(): def prograde_time(time, time_datum, time_exponent): new_time = (time_datum + time)*10**(time_exponent) return new_time def retrograde_time(time, time_datum, time_exponent): new_time = (time_datum - time)*10**(time_exponent) return new_time convert_func = { 'prograde': prograde_time, 'retrograde': retrograde_time, } if self.time_unit is not None: tu = self.time_unit.lower() if tu.find('ky')>=0 or tu.find('ka')>=0: time_dir = 'retrograde' time_datum = 1950/1e3 time_exponent = 3 time_unit_label = 'ky BP' elif tu.find('my')>=0 or tu.find('ma')>=0: time_dir = 'retrograde' time_datum = 1950/1e6 time_exponent = 6 elif tu.find('y bp')>=0 or tu.find('yr bp')>=0 or tu.find('yrs bp')>=0 or tu.find('year bp')>=0 or tu.find('years bp')>=0: time_dir ='retrograde' time_datum = 1950 time_exponent = 0 else: time_dir ='prograde' time_datum = 0 time_exponent = 0 new_time = convert_func[time_dir](self.time, time_datum, time_exponent) else: new_time = None return new_time def convert_to_bp(): time_yrs = convert_to_years() time_bp = 1950 - time_yrs return time_bp def convert_to_ka(): time_bp = convert_to_bp() time_ka = time_bp / 1e3 return time_ka def convert_to_ma(): time_bp = convert_to_bp() time_ma = time_bp / 1e6 return time_ma convert_to = { 'yrs': convert_to_years(), 'yrs BP': convert_to_bp(), 'ky BP': convert_to_ka(), 'my BP': convert_to_ma(), } new_time = convert_to[time_unit_label] dt = np.diff(new_time) if any(dt<=0): new_value, new_time = tsbase.sort_ts(self.value, new_time) else: new_value = self.copy().value new_ts.time = new_time new_ts.value = new_value new_ts.time_unit = time_unit return new_ts def make_labels(self): ''' Initialization of labels Returns ------- time_header : str Label for the time axis value_header : str Label for the value axis ''' if self.time_name is not None: time_name_str = self.time_name else: time_name_str = 'time' if self.value_name is not None: value_name_str = self.value_name else: value_name_str = 'value' if self.value_unit is not None: value_header = f'{value_name_str} [{self.value_unit}]' else: value_header = f'{value_name_str}' if self.time_unit is not None: time_header = f'{time_name_str} [{self.time_unit}]' else: time_header = f'{time_name_str}' return time_header, value_header def __str__(self): ''' Prints out the series in a table format and length of the series Returns ------- str length of the timeseries. ''' time_label, value_label = self.make_labels() table = { time_label: self.time, value_label: self.value, } msg = print(tabulate(table, headers='keys')) return f'Length: {np.size(self.time)}' def stats(self): """ Compute basic statistics for the time series Computes the mean, median, min, max, standard deviation, and interquartile range of a numpy array y, ignoring NaNs. Returns ------- res : dictionary Contains the mean, median, minimum value, maximum value, standard deviation, and interquartile range for the Series. Examples -------- Compute basic statistics for the SOI series .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1) time=data.iloc[:,1] value=data.iloc[:,2] ts=pyleo.Series(time=time,value=value,time_name='Year C.E', value_name='SOI', label='SOI') ts.stats() """ mean, median, min_, max_, std, IQR = tsutils.simple_stats(self.value) res={'mean':mean, 'median':median, 'min':min_, 'max':max_, 'std':std, 'IQR': IQR} return res def plot(self, figsize=[10, 4], marker=None, markersize=None, color=None, linestyle=None, linewidth=None, xlim=None, ylim=None, label=None, xlabel=None, ylabel=None, title=None, zorder=None, legend=True, plot_kwargs=None, lgd_kwargs=None, alpha=None, savefig_settings=None, ax=None, mute=False, invert_xaxis=False): ''' Plot the timeseries Parameters ---------- figsize : list a list of two integers indicating the figure size marker : str e.g., 'o' for dots See [matplotlib.markers](https://matplotlib.org/3.1.3/api/markers_api.html) for details markersize : float the size of the marker color : str, list the color for the line plot e.g., 'r' for red See [matplotlib colors] (https://matplotlib.org/3.2.1/tutorials/colors/colors.html) for details linestyle : str e.g., '--' for dashed line See [matplotlib.linestyles](https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html) for details linewidth : float the width of the line label : str the label for the line xlabel : str the label for the x-axis ylabel : str the label for the y-axis title : str the title for the figure zorder : int The default drawing order for all lines on the plot legend : {True, False} plot legend or not invert_xaxis : bool, optional if True, the x-axis of the plot will be inverted plot_kwargs : dict the dictionary of keyword arguments for ax.plot() See [matplotlib.pyplot.plot](https://matplotlib.org/3.1.3/api/_as_gen/matplotlib.pyplot.plot.html) for details lgd_kwargs : dict the dictionary of keyword arguments for ax.legend() See [matplotlib.pyplot.legend](https://matplotlib.org/3.1.3/api/_as_gen/matplotlib.pyplot.legend.html) for details alpha : float Transparency setting savefig_settings : dict the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} ax : matplotlib.axis, optional the axis object from matplotlib See [matplotlib.axes](https://matplotlib.org/api/axes_api.html) for details. mute : {True,False} if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) Returns ------- fig : matplotlib.figure the figure object from matplotlib See [matplotlib.pyplot.figure](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.figure.html) for details. ax : matplotlib.axis the axis object from matplotlib See [matplotlib.axes](https://matplotlib.org/api/axes_api.html) for details. Notes ----- When `ax` is passed, the return will be `ax` only; otherwise, both `fig` and `ax` will be returned. See also -------- pyleoclim.utils.plotting.savefig : saving figure in Pyleoclim Examples -------- Plot the SOI record .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1) time = data.iloc[:,1] value = data.iloc[:,2] ts = pyleo.Series(time=time,value=value,time_name='Year C.E', value_name='SOI', label='SOI') @savefig ts_plot.png fig, ax = ts.plot() pyleo.closefig(fig) Change the line color .. ipython:: python :okwarning: :okexcept: @savefig ts_plot2.png fig, ax = ts.plot(color='r') pyleo.closefig(fig) Save the figure. Two options available: * Within the plotting command * After the figure has been generated .. ipython:: python :okwarning: :okexcept: fig, ax = ts.plot(color='k', savefig_settings={'path': 'ts_plot3.png'}) pyleo.savefig(fig,path='ts_plot3.png') ''' # generate default axis labels time_label, value_label = self.make_labels() if xlabel is None: xlabel = time_label if ylabel is None: ylabel = value_label plot_kwargs = {} if plot_kwargs is None else plot_kwargs.copy() if label is None: label = self.label if label is not None: plot_kwargs.update({'label': label}) if marker is not None: plot_kwargs.update({'marker': marker}) if markersize is not None: plot_kwargs.update({'markersize': markersize}) if color is not None: plot_kwargs.update({'color': color}) if linestyle is not None: plot_kwargs.update({'linestyle': linestyle}) if linewidth is not None: plot_kwargs.update({'linewidth': linewidth}) if alpha is not None: plot_kwargs.update({'alpha': alpha}) if zorder is not None: plot_kwargs.update({'zorder': zorder}) res = plotting.plot_xy( self.time, self.value, figsize=figsize, xlabel=xlabel, ylabel=ylabel, title=title, savefig_settings=savefig_settings, ax=ax, legend=legend, xlim=xlim, ylim=ylim, plot_kwargs=plot_kwargs, lgd_kwargs=lgd_kwargs, mute=mute, invert_xaxis=invert_xaxis, ) return res def ssa(self, M=None, nMC=0, f=0.3, trunc = None, var_thresh=80): ''' Singular Spectrum Analysis Nonparametric, orthogonal decomposition of timeseries into constituent oscillations. This implementation uses the method of [1], with applications presented in [2]. Optionally (MC>0), the significance of eigenvalues is assessed by Monte-Carlo simulations of an AR(1) model fit to X, using [3]. The method expects regular spacing, but is tolerant to missing values, up to a fraction 0<f<1 (see [4]). Parameters ---------- M : int, optional window size. The default is None (10% of the length of the series). MC : int, optional Number of iteration in the Monte-Carlo process. The default is 0. f : float, optional maximum allowable fraction of missing values. The default is 0.3. trunc : str if present, truncates the expansion to a level K < M owing to one of 3 criteria: (1) 'kaiser': variant of the Kaiser-Guttman rule, retaining eigenvalues larger than the median (2) 'mcssa': Monte-Carlo SSA (use modes above the 95% threshold) (3) 'var': first K modes that explain at least var_thresh % of the variance. Default is None, which bypasses truncation (K = M) var_thresh : float variance threshold for reconstruction (only impactful if trunc is set to 'var') Returns ------- res : object of the SsaRes class containing: - eigvals : (M, ) array of eigenvalues - eigvecs : (M, M) Matrix of temporal eigenvectors (T-EOFs) - PC : (N - M + 1, M) array of principal components (T-PCs) - RCmat : (N, M) array of reconstructed components - RCseries : (N,) reconstructed series, with mean and variance restored - pctvar: (M, ) array of the fraction of variance (%) associated with each mode - eigvals_q : (M, 2) array contaitning the 5% and 95% quantiles of the Monte-Carlo eigenvalue spectrum [ if nMC >0 ] Examples -------- SSA with SOI .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1) time = data.iloc[:,1] value = data.iloc[:,2] ts = pyleo.Series(time=time, value=value, time_name='Year C.E', value_name='SOI', label='SOI') # plot @savefig ts_plot4.png fig, ax = ts.plot() pyleo.closefig(fig) # SSA nino_ssa = ts.ssa(M=60) Let us now see how to make use of all these arrays. The first step is too inspect the eigenvalue spectrum ("scree plot") to identify remarkable modes. Let us restrict ourselves to the first 40, so we can see something: .. ipython:: python :okwarning: :okexcept: var_pct = nino_ssa['pctvar'] # extract the fraction of variance attributable to each mode # plot eigenvalues nino_ssa.screeplot() @savefig ts_eigen.png pyleo.showfig(fig) pyleo.closefig(fig) This highlights a few common phenomena with SSA: * the eigenvalues are in descending order * their uncertainties are proportional to the eigenvalues themselves * the eigenvalues tend to come in pairs : (1,2) (3,4), are all clustered within uncertainties . (5,6) looks like another doublet * around i=15, the eigenvalues appear to reach a floor, and all subsequent eigenvalues explain a very small amount of variance. So, summing the variance of all modes higher than 19, we get: .. ipython:: python :okwarning: :okexcept: print(nino_ssa.pctvar[15:].sum()*100) That is, over 95% of the variance is in the first 15 modes. That is a typical result for a (paleo)climate timeseries; a few modes do the vast majority of the work. That means we can focus our attention on these modes and capture most of the interesting behavior. To see this, let's use the reconstructed components (RCs), and sum the RC matrix over the first 15 columns: .. ipython:: python :okwarning: :okexcept: RCk = nino_ssa.RCmat[:,:14].sum(axis=1) fig, ax = ts.plot(title='ONI') # we mute the first call to only get the plot with 2 lines ax.plot(time,RCk,label='SSA reconstruction, 14 modes',color='orange') ax.legend() @savefig ssa_recon.png pyleo.showfig(fig) pyleo.closefig(fig) Indeed, these first few modes capture the vast majority of the low-frequency behavior, including all the El Niño/La Niña events. What is left (the blue wiggles not captured in the orange curve) are high-frequency oscillations that might be considered "noise" from the standpoint of ENSO dynamics. This illustrates how SSA might be used for filtering a timeseries. One must be careful however: * there was not much rhyme or reason for picking 15 modes. Why not 5, or 39? All we have seen so far is that they gather >95% of the variance, which is by no means a magic number. * there is no guarantee that the first few modes will filter out high-frequency behavior, or at what frequency cutoff they will do so. If you need to cut out specific frequencies, you are better off doing it with a classical filter, like the butterworth filter implemented in Pyleoclim. However, in many instances the choice of a cutoff frequency is itself rather arbitrary. In such cases, SSA provides a principled alternative for generating a version of a timeseries that preserves features and excludes others (i.e, a filter). * as with all orthgonal decompositions, summing over all RCs will recover the original signal within numerical precision. Monte-Carlo SSA Selecting meaningful modes in eigenproblems (e.g. EOF analysis) is more art than science. However, one technique stands out: Monte Carlo SSA, introduced by Allen & Smith, (1996) to identiy SSA modes that rise above what one would expect from "red noise", specifically an AR(1) process_process). To run it, simply provide the parameter MC, ideally with a number of iterations sufficient to get decent statistics. Here's let's use MC = 1000. The result will be stored in the eigval_q array, which has the same length as eigval, and its two columns contain the 5% and 95% quantiles of the ensemble of MC-SSA eigenvalues. .. ipython:: python :okwarning: :okexcept: nino_mcssa = ts.ssa(M = 60, nMC=1000) Now let's look at the result: .. ipython:: python :okwarning: :okexcept: nino_mcssa.screeplot() @savefig scree_nmc.png pyleo.showfig(fig) pyleo.closefig(fig) This suggests that modes 1-5 fall above the red noise benchmark. ''' res = decomposition.ssa(self.value, M=M, nMC=nMC, f=f, trunc = trunc, var_thresh=var_thresh) resc = SsaRes(name=self.value_name, original=self.value, time = self.time, eigvals = res['eigvals'], eigvecs = res['eigvecs'], pctvar = res['pctvar'], PC = res['PC'], RCmat = res['RCmat'], RCseries=res['RCseries'], mode_idx=res['mode_idx']) if nMC >= 0: resc.eigvals_q=res['eigvals_q'] # assign eigenvalue quantiles if Monte-Carlo SSA was called return resc def is_evenly_spaced(self, tol=1e-3): ''' Check if the Series time axis is evenly-spaced, within tolerance Returns ------ res : bool ''' res = tsbase.is_evenly_spaced(self.time, tol) return res def filter(self, cutoff_freq=None, cutoff_scale=None, method='butterworth', **kwargs): ''' Filtering methods for Series objects using four possible methods: - `Butterworth <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html>`_ - `Lanczos <http://scitools.org.uk/iris/docs/v1.2/examples/graphics/SOI_filtering.html>`_ - `Finite Impulse Response <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.firwin.html>`_ - `Savitzky-Golay filter <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html>`_ By default, this method implements a lowpass filter, though it can easily be turned into a bandpass or high-pass filter (see examples below). Parameters ---------- method : str, {'savitzky-golay', 'butterworth', 'firwin', 'lanczos'} the filtering method - 'butterworth': a Butterworth filter (default = 3rd order) - 'savitzky-golay': Savitzky-Golay filter - 'firwin': finite impulse response filter design using the window method, with default window as Hamming - 'lanczos': Lanczos zero-phase filter cutoff_freq : float or list The cutoff frequency only works with the Butterworth method. If a float, it is interpreted as a low-frequency cutoff (lowpass). If a list, it is interpreted as a frequency band (f1, f2), with f1 < f2 (bandpass). Note that only the Butterworth option (default) currently supports bandpass filtering. cutoff_scale : float or list cutoff_freq = 1 / cutoff_scale The cutoff scale only works with the Butterworth method and when cutoff_freq is None. If a float, it is interpreted as a low-frequency (high-scale) cutoff (lowpass). If a list, it is interpreted as a frequency band (f1, f2), with f1 < f2 (bandpass). kwargs : dict a dictionary of the keyword arguments for the filtering method, see `pyleoclim.utils.filter.savitzky_golay`, `pyleoclim.utils.filter.butterworth`, `pyleoclim.utils.filter.lanczos` and `pyleoclim.utils.filter.firwin` for the details Returns ------- new : pyleoclim.Series See also -------- pyleoclim.utils.filter.butterworth : Butterworth method pyleoclim.utils.filter.savitzky_golay : Savitzky-Golay method pyleoclim.utils.filter.firwin : FIR filter design using the window method pyleoclim.utils.filter.lanczos : lowpass filter via Lanczos resampling Examples -------- In the example below, we generate a signal as the sum of two signals with frequency 10 Hz and 20 Hz, respectively. Then we apply a low-pass filter with a cutoff frequency at 15 Hz, and compare the output to the signal of 10 Hz. After that, we apply a band-pass filter with the band 15-25 Hz, and compare the outcome to the signal of 20 Hz. - Generating the test data .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import numpy as np t = np.linspace(0, 1, 1000) sig1 = np.sin(2*np.pi*10*t) sig2 = np.sin(2*np.pi*20*t) sig = sig1 + sig2 ts1 = pyleo.Series(time=t, value=sig1) ts2 = pyleo.Series(time=t, value=sig2) ts = pyleo.Series(time=t, value=sig) fig, ax = ts.plot(label='mix') ts1.plot(ax=ax, label='10 Hz') ts2.plot(ax=ax, label='20 Hz') ax.legend(loc='upper left', bbox_to_anchor=(0, 1.1), ncol=3) @savefig ts_filter1.png pyleo.showfig(fig) pyleo.closefig(fig) - Applying a low-pass filter .. ipython:: python :okwarning: :okexcept: fig, ax = ts.plot(label='mix') ts.filter(cutoff_freq=15).plot(ax=ax, label='After 15 Hz low-pass filter') ts1.plot(ax=ax, label='10 Hz') ax.legend(loc='upper left', bbox_to_anchor=(0, 1.1), ncol=3) @savefig ts_filter2.png pyleo.showfig(fig) pyleo.closefig(fig) - Applying a band-pass filter .. ipython:: python :okwarning: :okexcept: fig, ax = ts.plot(label='mix') ts.filter(cutoff_freq=[15, 25]).plot(ax=ax, label='After 15-25 Hz band-pass filter') ts2.plot(ax=ax, label='20 Hz') ax.legend(loc='upper left', bbox_to_anchor=(0, 1.1), ncol=3) @savefig ts_filter3.png pyleo.showfig(fig) pyleo.closefig(fig) Above is using the default Butterworth filtering. To use FIR filtering with a window like Hanning is also simple: .. ipython:: python :okwarning: :okexcept: fig, ax = ts.plot(label='mix') ts.filter(cutoff_freq=[15, 25], method='firwin', window='hanning').plot(ax=ax, label='After 15-25 Hz band-pass filter') ts2.plot(ax=ax, label='20 Hz') ax.legend(loc='upper left', bbox_to_anchor=(0, 1.1), ncol=3) @savefig ts_filter4.png pyleo.showfig(fig) pyleo.closefig(fig) - Applying a high-pass filter .. ipython:: python :okwarning: :okexcept: fig, ax = ts.plot(label='mix') ts_low = ts.filter(cutoff_freq=15) ts_high = ts.copy() ts_high.value = ts.value - ts_low.value # subtract low-pass filtered series from original one ts_high.plot(label='High-pass filter @ 15Hz',ax=ax) ax.legend(loc='upper left', bbox_to_anchor=(0, 1.1), ncol=3) @savefig ts_filter5.png pyleo.showfig(fig) pyleo.closefig(fig) ''' if not self.is_evenly_spaced(): raise ValueError('This method assumes evenly-spaced timeseries, while the input is not. Use the ".interp()", ".bin()" or ".gkernel()" methods prior to ".filter()".') new = self.copy() mu = np.mean(self.value) # extract the mean y = self.value - mu fs = 1/np.mean(np.diff(self.time)) method_func = { 'savitzky-golay': filterutils.savitzky_golay, 'butterworth': filterutils.butterworth, 'firwin': filterutils.firwin, 'lanczos': filterutils.lanczos, } args = {} if method in ['butterworth', 'firwin', 'lanczos']: if cutoff_freq is None: if cutoff_scale is None: raise ValueError('Please set the cutoff frequency or scale argument: "cutoff_freq" or "cutoff_scale".') else: if np.isscalar(cutoff_scale): cutoff_freq = 1 / cutoff_scale elif len(cutoff_scale) == 2 and method in ['butterworth', 'firwin']: cutoff_scale = np.array(cutoff_scale) cutoff_freq = np.sort(1 / cutoff_scale) cutoff_freq = list(cutoff_freq) elif len(cutoff_scale) > 1 and method == 'lanczos': raise ValueError('Lanczos filter requires a scalar input as cutoff scale/frequency') else: raise ValueError('Wrong cutoff_scale; should be either one float value (lowpass) or a list two float values (bandpass).') # assign optional arguments args['butterworth'] = {'fc': cutoff_freq, 'fs': fs} args['firwin'] = {'fc': cutoff_freq, 'fs': fs} args['lanczos'] = {'fc': cutoff_freq, 'fs': fs} else: # for Savitzky-Golay only if cutoff_scale and cutoff_freq is None: raise ValueError('No cutoff_scale or cutoff_freq argument provided') elif cutoff_freq is not None: cutoff_scale = 1 / cutoff_freq window_length = int(cutoff_scale*fs) if window_length % 2 == 0: window_length += 1 # window length needs to be an odd integer args['savitzky-golay'] = {'window_length': window_length} args[method].update(kwargs) new_val = method_func[method](y, **args[method]) new.value = new_val + mu # restore the mean return new def distplot(self, figsize=[10, 4], title=None, savefig_settings=None, ax=None, ylabel='KDE', vertical=False, edgecolor='w',mute=False, **plot_kwargs): ''' Plot the distribution of the timeseries values Parameters ---------- figsize : list a list of two integers indicating the figure size title : str the title for the figure savefig_settings : dict the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} ax : matplotlib.axis, optional A matplotlib axis ylabel : str Label for the count axis vertical : {True,False} Whether to flip the plot vertically edgecolor : matplotlib.color The color of the edges of the bar mute : {True,False} if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) plot_kwargs : dict Plotting arguments for seaborn histplot: https://seaborn.pydata.org/generated/seaborn.histplot.html See also -------- pyleoclim.utils.plotting.savefig : saving figure in Pyleoclim Examples -------- Distribution of the SOI record .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1) time=data.iloc[:,1] value=data.iloc[:,2] ts=pyleo.Series(time=time,value=value,time_name='Year C.E', value_name='SOI', label='SOI') @savefig ts_plot5.png fig, ax = ts.plot() pyleo.closefig(fig) @savefig ts_dist.png fig, ax = ts.distplot() pyleo.closefig(fig) ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() if ax is None: fig, ax = plt.subplots(figsize=figsize) #make the data into a dataframe so we can flip the figure time_label, value_label = self.make_labels() if vertical == True: data=pd.DataFrame({'value':self.value}) ax = sns.histplot(data=data, y="value", ax=ax, kde=True, edgecolor=edgecolor, **plot_kwargs) ax.set_ylabel(value_label) ax.set_xlabel(ylabel) else: ax = sns.histplot(self.value, ax=ax, kde=True, edgecolor=edgecolor, **plot_kwargs) ax.set_xlabel(value_label) ax.set_ylabel(ylabel) if title is not None: ax.set_title(title) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) return fig, ax else: return ax def summary_plot(self, psd=None, scalogram=None, figsize=[8, 10], title=None, time_lim=None, value_lim=None, period_lim=None, psd_lim=None, n_signif_test=None, time_label=None, value_label=None, period_label=None, psd_label='PSD', wavelet_method = 'wwz', wavelet_kwargs = None, psd_method = 'wwz', psd_kwargs = None, ts_plot_kwargs = None, wavelet_plot_kwargs = None, psd_plot_kwargs = None, trunc_series = None, preprocess = True, y_label_loc = -.15, savefig_settings=None, mute=False): ''' Generate a plot of the timeseries and its frequency content through spectral and wavelet analyses. Parameters ---------- psd : PSD the PSD object of a Series. If None, and psd_kwargs is empty, the PSD from the calculated Scalogram will be used. Otherwise it will be calculated based on specifications in psd_kwargs. scalogram : Scalogram the Scalogram object of a Series. If None, will be calculated. This process can be slow as it will be using the WWZ method. If the passed scalogram object contains stored signif_scals (see pyleo.Scalogram.signif_test() for details) these will be flexibly reused as a function of the value of n_signif_test in the summary plot. figsize : list a list of two integers indicating the figure size title : str the title for the figure time_lim : list or tuple the limitation of the time axis. This is for display purposes only, the scalogram and psd will still be calculated using the full time series. value_lim : list or tuple the limitation of the value axis of the timeseries. This is for display purposes only, the scalogram and psd will still be calculated using the full time series. period_lim : list or tuple the limitation of the period axis psd_lim : list or tuple the limitation of the psd axis n_signif_test=None : int Number of Monte-Carlo simulations to perform for significance testing. Default is None. If a scalogram is passed it will be parsed for significance testing purposes. time_label : str the label for the time axis value_label : str the label for the value axis of the timeseries period_label : str the label for the period axis psd_label : str the label for the amplitude axis of PDS wavelet_method : str the method for the calculation of the scalogram, see pyleoclim.core.ui.Series.wavelet for details wavelet_kwargs : dict arguments to be passed to the wavelet function, see pyleoclim.core.ui.Series.wavelet for details psd_method : str the method for the calculation of the psd, see pyleoclim.core.ui.Series.spectral for details psd_kwargs : dict arguments to be passed to the spectral function, see pyleoclim.core.ui.Series.spectral for details ts_plot_kwargs : dict arguments to be passed to the timeseries subplot, see pyleoclim.core.ui.Series.plot for details wavelet_plot_kwargs : dict arguments to be passed to the scalogram plot, see pyleoclim.core.ui.Scalogram.plot for details psd_plot_kwargs : dict arguments to be passed to the psd plot, see pyleoclim.core.ui.PSD.plot for details Certain psd plot settings are required by summary plot formatting. These include: - ylabel - legend - tick parameters These will be overriden by summary plot to prevent formatting errors y_label_loc : float Plot parameter to adjust horizontal location of y labels to avoid conflict with axis labels, default value is -0.15 trunc_series : list or tuple the limitation of the time axis. This will slice the actual time series into one contained within the passed boundaries and as such effect the resulting scalogram and psd objects (assuming said objects are to be generated by summary_plot). preprocess : bool if True, the series will be standardized and detrended using pyleoclim defaults prior to the calculation of the scalogram and psd. The unedited series will be used in the plot, while the edited series will be used to calculate the psd and scalogram. savefig_settings : dict the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} mute : bool if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) See also -------- pyleoclim.core.ui.Series.spectral : Spectral analysis for a timeseries pyleoclim.core.ui.Series.wavelet : Wavelet analysis for a timeseries pyleoclim.utils.plotting.savefig : saving figure in Pyleoclim pyleoclim.core.ui.PSD : PSD object pyleoclim.core.ui.MultiplePSD : Multiple PSD object Examples -------- Simple summary_plot with n_signif_test = 1 for computational ease, defaults otherwise. .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd ts=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/master/example_data/soi_data.csv',skiprows = 1) series = pyleo.Series(time = ts['Year'],value = ts['Value'], time_name = 'Years', time_unit = 'AD') fig, ax = series.summary_plot(n_signif_test=1) pyleo.showfig(fig) pyleo.closefig(fig) Summary_plot with pre-generated psd and scalogram objects. Note that if the scalogram contains saved noise realizations these will be flexibly reused. See pyleo.Scalogram.signif_test() for details .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd ts=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/master/example_data/soi_data.csv',skiprows = 1) series = pyleo.Series(time = ts['Year'],value = ts['Value'], time_name = 'Years', time_unit = 'AD') psd = series.spectral(freq_method = 'welch') scalogram = series.wavelet(freq_method = 'welch') fig, ax = series.summary_plot(psd = psd,scalogram = scalogram,n_signif_test=2) pyleo.showfig(fig) pyleo.closefig(fig) Summary_plot with pre-generated psd and scalogram objects from before and some plot modification arguments passed. Note that if the scalogram contains saved noise realizations these will be flexibly reused. See pyleo.Scalogram.signif_test() for details .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd ts=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/master/example_data/soi_data.csv',skiprows = 1) series = pyleo.Series(time = ts['Year'],value = ts['Value'], time_name = 'Years', time_unit = 'AD') psd = series.spectral(freq_method = 'welch') scalogram = series.wavelet(freq_method = 'welch') fig, ax = series.summary_plot(psd = psd,scalogram = scalogram, n_signif_test=2, period_lim = [5,0], ts_plot_kwargs = {'color':'red','linewidth':.5}, psd_plot_kwargs = {'color':'red','linewidth':.5}) pyleo.showfig(fig) pyleo.closefig(fig) ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() fig = plt.figure(figsize=figsize) gs = gridspec.GridSpec(6, 12) gs.update(wspace=0, hspace=0) wavelet_kwargs={} if wavelet_kwargs is None else wavelet_kwargs.copy() wavelet_plot_kwargs={} if wavelet_plot_kwargs is None else wavelet_plot_kwargs.copy() psd_kwargs={} if psd_kwargs is None else psd_kwargs.copy() psd_plot_kwargs={} if psd_plot_kwargs is None else psd_plot_kwargs.copy() ts_plot_kwargs={} if ts_plot_kwargs is None else ts_plot_kwargs.copy() if trunc_series is not None: sub_time = [] if trunc_series[0] <= self.time[0] and trunc_series[1] >= self.time[-1]: print('Truncation period encapsulates entire series, continuing with defaults.') else: for i in self.time: if i >= trunc_series[0] and i <= trunc_series[1]: sub_time.append(i) try: self = self.slice(sub_time) except: print('Number of time points in given truncation period is not even. Removing last time point and continuing.') sub_time.pop(-1) self = self.slice(sub_time) ax = {} ax['ts'] = plt.subplot(gs[0:1, :-3]) ax['ts'] = self.plot(ax=ax['ts'], **ts_plot_kwargs) ax['ts'].xaxis.set_visible(False) ax['ts'].get_yaxis().set_label_coords(y_label_loc,0.5) if preprocess: self = self.standardize().detrend() if time_lim is not None: ax['ts'].set_xlim(time_lim) if 'xlim' in ts_plot_kwargs: print('Xlim passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.') if value_lim is not None: ax['ts'].set_ylim(value_lim) if 'ylim' in ts_plot_kwargs: print('Ylim passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.') ax['scal'] = plt.subplot(gs[1:5, :-3], sharex=ax['ts']) if 'method' in list(wavelet_kwargs.keys()): del wavelet_kwargs['method'] print('Please pass method via exposed wavelet_method argument, exposed argument overrides key word argument') if n_signif_test is None: #If significance testing isn't specified then we either use what we can find or don't do significance testing if scalogram is not None: if getattr(scalogram,'signif_scals',None) is not None: n_signif_test = len(scalogram.signif_scals.scalogram_list) else: n_signif_test = 0 else: n_signif_test = 0 if n_signif_test > 0: #If a scalogram is not passed and significance tests are requested if scalogram is None: scalogram = self.wavelet(method=wavelet_method, **wavelet_kwargs).signif_test(number=n_signif_test, export_scal=True) #If a scalogram is passed, significance tests are requested, and the passed scalogram has some significance tests stored in it elif scalogram is not None: scalogram = scalogram.signif_test(number=n_signif_test, export_scal = True) elif n_signif_test == 0: #If specifically no significance tests are requested we just need to generate a scalogram (unless one has been passed) if scalogram is None: scalogram = self.wavelet(method=wavelet_method, **wavelet_kwargs) if 'cbar_style' not in wavelet_plot_kwargs: wavelet_plot_kwargs.update({'cbar_style':{'orientation': 'horizontal', 'pad': 0.12}}) ax['scal'] = scalogram.plot(ax=ax['scal'], **wavelet_plot_kwargs) ax['scal'].invert_yaxis() ax['scal'].get_yaxis().set_label_coords(y_label_loc,0.5) ax['psd'] = plt.subplot(gs[1:4, -3:], sharey=ax['scal']) if 'method' in list(psd_kwargs.keys()): del psd_kwargs['method'] print('Please pass method via exposed psd_method argument, exposed argument overrides key word argument') #Doing effectively the same thing we did for scalogram but now for the psd object if n_signif_test > 0: if psd is None: if psd_method == scalogram.wave_method: psd = self.spectral(method=psd_method,scalogram=scalogram,**psd_kwargs).signif_test(number=n_signif_test,scalogram=scalogram) elif psd_method != scalogram.wave_method: psd = self.spectral(method=psd_method,**psd_kwargs).signif_test(number=n_signif_test) elif psd is not None: if psd_method == scalogram.wave_method: psd = psd.signif_test(number=n_signif_test,scalogram=scalogram) elif psd_method != scalogram.wave_method: psd = psd.signif_test(number=n_signif_test) #At this point n_signif_test will be set to zero or some positive number elif n_signif_test == 0: if psd is None: if psd_method == scalogram.wave_method: psd = self.spectral(method=psd_method,scalogram=scalogram,**psd_kwargs) elif psd_method != scalogram.wave_method: psd = self.spectral(method=psd_method,**psd_kwargs) ax['psd'] = psd.plot(ax=ax['psd'], transpose=True, **psd_plot_kwargs) if period_lim is not None: ax['psd'].set_ylim(period_lim) if 'ylim' in psd_plot_kwargs: print('Ylim passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.') ax['psd'].yaxis.set_visible(False) ax['psd'].invert_yaxis() ax['psd'].set_ylabel(None) ax['psd'].tick_params(axis='y', direction='in', labelleft=False) ax['psd'].legend().remove() if psd_lim is not None: ax['psd'].set_xlim(psd_lim) if 'xlim' in psd_plot_kwargs: print('Xlim passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument') if title is not None: ax['ts'].set_title(title) if 'title' in ts_plot_kwargs: print('Title passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.') if value_label is not None: #time_label, value_label = self.make_labels() ax['ts'].set_ylabel(value_label) if 'ylabel' in ts_plot_kwargs: print('Ylabel passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.') if time_label is not None: #time_label, value_label = self.make_labels() ax['scal'].set_xlabel(time_label) if 'xlabel' in wavelet_plot_kwargs: print('Xlabel passed to scalogram plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.') if period_label is not None: #period_unit = infer_period_unit_from_time_unit(self.time_unit) #period_label = f'Period [{period_unit}]' if period_unit is not None else 'Period' ax['scal'].set_ylabel(period_label) if 'ylabel' in wavelet_plot_kwargs: print('Ylabel passed to scalogram plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.') if psd_label is not None: ax['psd'].set_xlabel(psd_label) if 'xlabel' in psd_plot_kwargs: print('Xlabel passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.') if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax def copy(self): '''Make a copy of the Series object Returns ------- Series A copy of the Series object ''' return deepcopy(self) def clean(self, verbose=False): ''' Clean up the timeseries by removing NaNs and sort with increasing time points Parameters ---------- verbose : bool If True, will print warning messages if there is any Returns ------- Series Series object with removed NaNs and sorting ''' new = self.copy() v_mod, t_mod = tsbase.clean_ts(self.value, self.time, verbose=verbose) new.time = t_mod new.value = v_mod return new def sort(self, verbose=False): ''' Ensure timeseries is aligned to a prograde axis. If the time axis is prograde to begin with, no transformation is applied. Parameters ---------- verbose : bool If True, will print warning messages if there is any Returns ------- Series Series object with removed NaNs and sorting ''' new = self.copy() v_mod, t_mod = tsbase.sort_ts(self.value, self.time, verbose=verbose) new.time = t_mod new.value = v_mod return new def gaussianize(self): ''' Gaussianizes the timeseries Returns ------- new : pyleoclim.Series The Gaussianized series object ''' new = self.copy() v_mod = tsutils.gaussianize(self.value) new.value = v_mod return new def standardize(self): '''Standardizes the series ((i.e. renove its estimated mean and divides by its estimated standard deviation) Returns ------- new : pyleoclim.Series The standardized series object ''' new = self.copy() v_mod = tsutils.standardize(self.value)[0] new.value = v_mod return new def center(self, timespan=None): ''' Centers the series (i.e. renove its estimated mean) Parameters ---------- timespan : tuple or list The timespan over which the mean must be estimated. In the form [a, b], where a, b are two points along the series' time axis. Returns ------- tsc : pyleoclim.Series The centered series object ts_mean : estimated mean of the original series, in case it needs to be restored later ''' tsc = self.copy() if timespan is not None: ts_mean = np.nanmean(self.slice(timespan).value) vc = self.value - ts_mean else: ts_mean = np.nanmean(self.value) vc = self.value - ts_mean tsc.value = vc return tsc, ts_mean def segment(self, factor=10): """Gap detection This function segments a timeseries into n number of parts following a gap detection algorithm. The rule of gap detection is very simple: we define the intervals between time points as dts, then if dts[i] is larger than factor * dts[i-1], we think that the change of dts (or the gradient) is too large, and we regard it as a breaking point and divide the time series into two segments here Parameters ---------- ts : pyleoclim Series factor : float The factor that adjusts the threshold for gap detection Returns ------- res : pyleoclim MultipleSeries Object or pyleoclim Series Object If gaps were detected, returns the segments in a MultipleSeries object, else, returns the original timeseries. """ seg_y, seg_t, n_segs = tsutils.ts2segments(self.value,self.time,factor=factor) if len(seg_y)>1: s_list=[] for idx,s in enumerate(seg_y): s_tmp=Series(time=seg_t[idx],value=s,time_name=self.time_name, time_unit=self.time_unit, value_name=self.value_name, value_unit=self.value_unit,label=self.label) s_list.append(s_tmp) res=MultipleSeries(series_list=s_list) elif len(seg_y)==1: res=self.copy() else: raise ValueError('No timeseries detected') return res def slice(self, timespan): ''' Slicing the timeseries with a timespan (tuple or list) Parameters ---------- timespan : tuple or list The list of time points for slicing, whose length must be even. When there are n time points, the output Series includes n/2 segments. For example, if timespan = [a, b], then the sliced output includes one segment [a, b]; if timespan = [a, b, c, d], then the sliced output includes segment [a, b] and segment [c, d]. Returns ------- new : Series The sliced Series object. ''' new = self.copy() n_elements = len(timespan) if n_elements % 2 == 1: raise ValueError('The number of elements in timespan must be even!') n_segments = int(n_elements / 2) mask = [False for i in range(np.size(self.time))] for i in range(n_segments): mask |= (self.time >= timespan[i*2]) & (self.time <= timespan[i*2+1]) new.time = self.time[mask] new.value = self.value[mask] return new def fill_na(self, timespan=None, dt=1): ''' Fill NaNs into the timespan Parameters ---------- timespan : tuple or list The list of time points for slicing, whose length must be 2. For example, if timespan = [a, b], then the sliced output includes one segment [a, b]. If None, will use the start point and end point of the original timeseries dt : float The time spacing to fill the NaNs; default is 1. Returns ------- new : Series The sliced Series object. ''' new = self.copy() if timespan is None: start = np.min(self.time) end = np.max(self.time) else: start = timespan[0] end = timespan[-1] new_time = np.arange(start, end+dt, dt) new_value = np.empty(np.size(new_time)) for i, t in enumerate(new_time): if t in self.time: loc = list(self.time).index(t) new_value[i] = self.value[loc] else: new_value[i] = np.nan new.time = new_time new.value = new_value return new def detrend(self, method='emd', **kwargs): '''Detrend Series object Parameters ---------- method : str, optional The method for detrending. The default is 'emd'. Options include: * "linear": the result of a n ordinary least-squares stright line fit to y is subtracted. * "constant": only the mean of data is subtracted. * "savitzky-golay", y is filtered using the Savitzky-Golay filters and the resulting filtered series is subtracted from y. * "emd" (default): Empirical mode decomposition. The last mode is assumed to be the trend and removed from the series **kwargs : dict Relevant arguments for each of the methods. Returns ------- new : pyleoclim.Series Detrended Series object See also -------- pyleoclim.utils.tsutils.detrend : detrending wrapper functions Examples -------- We will generate a random signal with a nonlinear trend and use two detrending options to recover the original signal. .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import numpy as np # Generate a mixed harmonic signal with known frequencies freqs=[1/20,1/80] time=np.arange(2001) signals=[] for freq in freqs: signals.append(np.cos(2*np.pi*freq*time)) signal=sum(signals) # Add a non-linear trend slope = 1e-5; intercept = -1 nonlinear_trend = slope*time**2 + intercept # Add a modicum of white noise np.random.seed(2333) sig_var = np.var(signal) noise_var = sig_var / 2 #signal is twice the size of noise white_noise = np.random.normal(0, np.sqrt(noise_var), size=np.size(signal)) signal_noise = signal + white_noise # Place it all in a series object and plot it: ts = pyleo.Series(time=time,value=signal_noise + nonlinear_trend) @savefig random_series.png fig, ax = ts.plot(title='Timeseries with nonlinear trend') pyleo.closefig(fig) # Detrending with default parameters (using EMD method with 1 mode) ts_emd1 = ts.detrend() ts_emd1.label = 'default detrending (EMD, last mode)' @savefig ts_emd1.png fig, ax = ts_emd1.plot(title='Detrended with EMD method') ax.plot(time,signal_noise,label='target signal') ax.legend() pyleo.showfig(fig) pyleo.closefig(fig) # We see that the default function call results in a "Hockey Stick" at the end, which is undesirable. # There is no automated way to do this, but with a little trial and error, we find that removing the 2 smoothest modes performs reasonably: ts_emd2 = ts.detrend(method='emd', n=2) ts_emd2.label = 'EMD detrending, last 2 modes' @savefig ts_emd_n2.png fig, ax = ts_emd2.plot(title='Detrended with EMD (n=2)') ax.plot(time,signal_noise,label='target signal') ax.legend() pyleo.showfig(fig) pyleo.closefig(fig) # Another option for removing a nonlinear trend is a Savitzky-Golay filter: ts_sg = ts.detrend(method='savitzky-golay') ts_sg.label = 'savitzky-golay detrending, default parameters' @savefig ts_sg.png fig, ax = ts_sg.plot(title='Detrended with Savitzky-Golay filter') ax.plot(time,signal_noise,label='target signal') ax.legend() pyleo.showfig(fig) pyleo.closefig(fig) # As we can see, the result is even worse than with EMD (default). Here it pays to look into the underlying method, which comes from SciPy. # It turns out that by default, the Savitzky-Golay filter fits a polynomial to the last "window_length" values of the edges. # By default, this value is close to the length of the series. Choosing a value 10x smaller fixes the problem here, though you will have to tinker with that parameter until you get the result you seek. ts_sg2 = ts.detrend(method='savitzky-golay',sg_kwargs={'window_length':201}) ts_sg2.label = 'savitzky-golay detrending, window_length = 201' @savefig ts_sg2.png fig, ax = ts_sg2.plot(title='Detrended with Savitzky-Golay filter') ax.plot(time,signal_noise,label='target signal') ax.legend() pyleo.showfig(fig) pyleo.closefig(fig) ''' new = self.copy() v_mod = tsutils.detrend(self.value, x=self.time, method=method, **kwargs) new.value = v_mod return new def spectral(self, method='lomb_scargle', freq_method='log', freq_kwargs=None, settings=None, label=None, scalogram=None, verbose=False): ''' Perform spectral analysis on the timeseries Parameters ---------- method : str {'wwz', 'mtm', 'lomb_scargle', 'welch', 'periodogram'} freq_method : str {'log','scale', 'nfft', 'lomb_scargle', 'welch'} freq_kwargs : dict Arguments for frequency vector settings : dict Arguments for the specific spectral method label : str Label for the PSD object scalogram : pyleoclim.core.ui.Series.Scalogram The return of the wavelet analysis; effective only when the method is 'wwz' verbose : bool If True, will print warning messages if there is any Returns ------- psd : pyleoclim.PSD A :mod:`pyleoclim.PSD` object See also -------- pyleoclim.utils.spectral.mtm : Spectral analysis using the Multitaper approach pyleoclim.utils.spectral.lomb_scargle : Spectral analysis using the Lomb-Scargle method pyleoclim.utils.spectral.welch: Spectral analysis using the Welch segement approach pyleoclim.utils.spectral.periodogram: Spectral anaysis using the basic Fourier transform pyleoclim.utils.spectral.wwz_psd : Spectral analysis using the Wavelet Weighted Z transform pyleoclim.utils.wavelet.make_freq_vector : Functions to create the frequency vector pyleoclim.utils.tsutils.detrend : Detrending function pyleoclim.core.ui.PSD : PSD object pyleoclim.core.ui.MultiplePSD : Multiple PSD object Examples -------- Calculate the spectrum of SOI using the various methods and compute significance .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1) time = data.iloc[:,1] value = data.iloc[:,2] ts = pyleo.Series(time=time, value=value, time_name='Year C.E', value_name='SOI', label='SOI') # Standardize the time series ts_std = ts.standardize() - Lomb-Scargle .. ipython:: python :okwarning: :okexcept: psd_ls = ts_std.spectral(method='lomb_scargle') psd_ls_signif = psd_ls.signif_test(number=20) #in practice, need more AR1 simulations @savefig spec_ls.png fig, ax = psd_ls_signif.plot(title='PSD using Lomb-Scargle method') pyleo.closefig(fig) We may pass in method-specific arguments via "settings", which is a dictionary. For instance, to adjust the number of overlapping segment for Lomb-Scargle, we may specify the method-specific argument "n50"; to adjust the frequency vector, we may modify the "freq_method" or modify the method-specific argument "freq". .. ipython:: python :okwarning: :okexcept: import numpy as np psd_LS_n50 = ts_std.spectral(method='lomb_scargle', settings={'n50': 4}) # c=1e-2 yields lower frequency resolution psd_LS_freq = ts_std.spectral(method='lomb_scargle', settings={'freq': np.linspace(1/20, 1/0.2, 51)}) psd_LS_LS = ts_std.spectral(method='lomb_scargle', freq_method='lomb_scargle') # with frequency vector generated using REDFIT method fig, ax = psd_LS_n50.plot( title='PSD using Lomb-Scargle method with 4 overlapping segments', label='settings={"n50": 4}') psd_ls.plot(ax=ax, label='settings={"n50": 3}', marker='o') @savefig spec_ls_n50.png pyleo.showfig(fig) pyleo.closefig(fig) fig, ax = psd_LS_freq.plot( title='PSD using Lomb-Scargle method with differnt frequency vectors', label='freq=np.linspace(1/20, 1/0.2, 51)', marker='o') psd_ls.plot(ax=ax, label='freq_method="log"', marker='o') @savefig spec_ls_freq.png pyleo.showfig(fig) pyleo.closefig(fig) You may notice the differences in the PSD curves regarding smoothness and the locations of the analyzed period points. For other method-specific arguments, please look up the specific methods in the "See also" section. - WWZ .. ipython:: python :okwarning: :okexcept: psd_wwz = ts_std.spectral(method='wwz') # wwz is the default method psd_wwz_signif = psd_wwz.signif_test(number=1) # significance test; for real work, should use number=200 or even larger @savefig spec_wwz.png fig, ax = psd_wwz_signif.plot(title='PSD using WWZ method') pyleo.closefig(fig) We may take advantage of a pre-calculated scalogram using WWZ to accelerate the spectral analysis (although note that the default parameters for spectral and wavelet analysis using WWZ are different): .. ipython:: python :okwarning: :okexcept: scal_wwz = ts_std.wavelet(method='wwz') # wwz is the default method psd_wwz_fast = ts_std.spectral(method='wwz', scalogram=scal_wwz) @savefig spec_wwz_fast.png fig, ax = psd_wwz_fast.plot(title='PSD using WWZ method w/ pre-calculated scalogram') pyleo.closefig(fig) - Periodogram .. ipython:: python :okwarning: :okexcept: ts_interp = ts_std.interp() psd_perio = ts_interp.spectral(method='periodogram') psd_perio_signif = psd_perio.signif_test(number=20) #in practice, need more AR1 simulations @savefig spec_perio.png fig, ax = psd_perio_signif.plot(title='PSD using Periodogram method') pyleo.closefig(fig) - Welch .. ipython:: python :okwarning: :okexcept: ts_interp = ts_std.interp() psd_welch = ts_interp.spectral(method='welch') psd_welch_signif = psd_welch.signif_test(number=20) #in practice, need more AR1 simulations @savefig spec_welch.png fig, ax = psd_welch_signif.plot(title='PSD using Welch method') pyleo.closefig(fig) - MTM .. ipython:: python :okwarning: :okexcept: ts_interp = ts_std.interp() psd_mtm = ts_interp.spectral(method='mtm') psd_mtm_signif = psd_mtm.signif_test(number=20) #in practice, need more AR1 simulations @savefig spec_mtm.png fig, ax = psd_mtm_signif.plot(title='PSD using MTM method') pyleo.closefig(fig) ''' if not verbose: warnings.simplefilter('ignore') settings = {} if settings is None else settings.copy() spec_func = { 'wwz': specutils.wwz_psd, 'mtm': specutils.mtm, 'lomb_scargle': specutils.lomb_scargle, 'welch': specutils.welch, 'periodogram': specutils.periodogram } args = {} freq_kwargs = {} if freq_kwargs is None else freq_kwargs.copy() freq = waveutils.make_freq_vector(self.time, method=freq_method, **freq_kwargs) args['wwz'] = {'freq': freq} args['mtm'] = {} args['lomb_scargle'] = {'freq': freq} args['welch'] = {} args['periodogram'] = {} args[method].update(settings) if method == 'wwz' and scalogram is not None: args['wwz'].update( { 'wwa': scalogram.amplitude, 'wwz_Neffs': scalogram.wwz_Neffs, 'wwz_freq': scalogram.frequency, } ) spec_res = spec_func[method](self.value, self.time, **args[method]) if type(spec_res) is dict: spec_res = dict2namedtuple(spec_res) if label is None: label = self.label if method == 'wwz' and scalogram is not None: args['wwz'].pop('wwa') args['wwz'].pop('wwz_Neffs') args['wwz'].pop('wwz_freq') psd = PSD( frequency=spec_res.freq, amplitude=spec_res.psd, label=label, timeseries=self, spec_method=method, spec_args=args[method] ) return psd def wavelet(self, method='wwz', settings=None, freq_method='log', ntau=None, freq_kwargs=None, verbose=False): ''' Perform wavelet analysis on the timeseries cwt wavelets documented on https://pywavelets.readthedocs.io/en/latest/ref/cwt.html Parameters ---------- method : {wwz, cwt} Whether to use the wwz method for unevenly spaced timeseries or traditional cwt (from pywavelets) freq_method : str {'log', 'scale', 'nfft', 'lomb_scargle', 'welch'} freq_kwargs : dict Arguments for frequency vector ntau : int The length of the time shift points that determins the temporal resolution of the result. If None, it will be either the length of the input time axis, or at most 50. settings : dict Arguments for the specific spectral method verbose : bool If True, will print warning messages if there is any Returns ------- scal : Series.Scalogram See also -------- pyleoclim.utils.wavelet.wwz : wwz function pyleoclim.utils.wavelet.cwt : cwt function pyleoclim.utils.wavelet.make_freq_vector : Functions to create the frequency vector pyleoclim.utils.tsutils.detrend : Detrending function pyleoclim.core.ui.Scalogram : Scalogram object pyleoclim.core.ui.MultipleScalogram : Multiple Scalogram object Examples -------- Wavelet analysis on the SOI record. .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1) time = data.iloc[:,1] value = data.iloc[:,2] ts = pyleo.Series(time=time,value=value,time_name='Year C.E', value_name='SOI', label='SOI') # WWZ scal = ts.wavelet() scal_signif = scal.signif_test(number=1) # for real work, should use number=200 or even larger @savefig spec_mtm.png fig, ax = scal_signif.plot() pyleo.closefig(fig) ''' if not verbose: warnings.simplefilter('ignore') settings = {} if settings is None else settings.copy() wave_func = { 'wwz': waveutils.wwz # 'cwt': waveutils.cwt, } if method == 'cwt' and 'freq' in settings.keys(): scales=1/np.array(settings['freq']) settings.update({'scales':scales}) del settings['freq'] freq_kwargs = {} if freq_kwargs is None else freq_kwargs.copy() freq = waveutils.make_freq_vector(self.time, method=freq_method, **freq_kwargs) args = {} if ntau is None: ntau = np.min([np.size(self.time), 50]) tau = np.linspace(np.min(self.time), np.max(self.time), ntau) args['wwz'] = {'tau': tau, 'freq': freq} args['cwt'] = {'wavelet' : 'morl', 'scales':1/freq} args[method].update(settings) wave_res = wave_func[method](self.value, self.time, **args[method]) if method == 'wwz': wwz_Neffs = wave_res.Neffs else: wwz_Neffs = None scal = Scalogram( frequency=wave_res.freq, time=wave_res.time, amplitude=wave_res.amplitude, coi=wave_res.coi, label=self.label, timeseries=self, wave_method=method, freq_method=freq_method, freq_kwargs=freq_kwargs, wave_args=args[method], wwz_Neffs=wwz_Neffs, ) return scal def wavelet_coherence(self, target_series, method='wwz', settings=None, freq_method='log', ntau=None, tau=None, freq_kwargs=None, verbose=False): ''' Perform wavelet coherence analysis with the target timeseries Parameters ---------- target_series : pyleoclim.Series A pyleoclim Series object on which to perform the coherence analysis method : {'wwz'} freq_method : str {'log','scale', 'nfft', 'lomb_scargle', 'welch'} freq_kwargs : dict Arguments for frequency vector tau : array The time shift points that determins the temporal resolution of the result. If None, it will be calculated using ntau. ntau : int The length of the time shift points that determins the temporal resolution of the result. If None, it will be either the length of the input time axis, or at most 50. settings : dict Arguments for the specific spectral method verbose : bool If True, will print warning messages if there is any Returns ------- coh : pyleoclim.Coherence See also -------- pyleoclim.utils.wavelet.xwt : Cross-wavelet analysis based on WWZ method pyleoclim.utils.wavelet.make_freq_vector : Functions to create the frequency vector pyleoclim.utils.tsutils.detrend : Detrending function pyleoclim.core.ui.Coherence : Coherence object Examples -------- Wavelet coherence with the default arguments: .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/wtc_test_data_nino.csv') time = data['t'].values air = data['air'].values nino = data['nino'].values ts_air = pyleo.Series(time=time, value=air, time_name='Year (CE)') ts_nino = pyleo.Series(time=time, value=nino, time_name='Year (CE)') # without any arguments, the `tau` will be determined automatically coh = ts_air.wavelet_coherence(ts_nino) @savefig coh.png fig, ax = coh.plot() pyleo.closefig() We may specify `ntau` to adjust the temporal resolution of the scalogram, which will affect the time consumption of calculation and the result itself: .. ipython:: python :okwarning: :okexcept: coh_ntau = ts_air.wavelet_coherence(ts_nino, ntau=30) @savefig coh_ntau.png fig, ax = coh_ntau.plot() pyleo.closefig() We may also specify the `tau` vector explicitly: .. ipython:: python :okwarning: :okexcept: coh_tau = ts_air.wavelet_coherence(ts_nino, tau=np.arange(1880, 2001)) @savefig coh_tau.png fig, ax = coh_tau.plot() pyleo.closefig() ''' if not verbose: warnings.simplefilter('ignore') settings = {} if settings is None else settings.copy() xwc_func = { 'wwz': waveutils.xwc, } freq_kwargs = {} if freq_kwargs is None else freq_kwargs.copy() freq = waveutils.make_freq_vector(self.time, method=freq_method, **freq_kwargs) t1 = np.copy(self.time) t2 = np.copy(target_series.time) dt1 = np.median(np.diff(t1)) dt2 = np.median(np.diff(t2)) overlap = np.arange(np.max([t1[0], t2[0]]), np.min([t1[-1], t2[-1]]), np.max([dt1, dt2])) if ntau is None: ntau = np.min([np.size(overlap), 50]) if tau is None: tau = np.linspace(np.min(overlap), np.max(overlap), ntau) args = {} args['wwz'] = {'tau': tau, 'freq': freq, 'verbose': verbose} args[method].update(settings) xwc_res = xwc_func[method](self.value, self.time, target_series.value, target_series.time, **args[method]) coh = Coherence( frequency=xwc_res.freq, time=xwc_res.time, coherence=xwc_res.xw_coherence, phase=xwc_res.xw_phase, coi=xwc_res.coi, timeseries1=self, timeseries2=target_series, freq_method=freq_method, freq_kwargs=freq_kwargs, ) return coh def correlation(self, target_series, timespan=None, alpha=0.05, settings=None, common_time_kwargs=None, seed=None): ''' Estimates the Pearson's correlation and associated significance between two non IID time series The significance of the correlation is assessed using one of the following methods: 1) 'ttest': T-test adjusted for effective sample size. 2) 'isopersistent': AR(1) modeling of x and y. 3) 'isospectral': phase randomization of original inputs. (default) The T-test is a parametric test, hence computationally cheap but can only be performed in ideal circumstances. The others are non-parametric, but their computational requirements scale with the number of simulations. The choise of significance test and associated number of Monte-Carlo simulations are passed through the settings parameter. Parameters ---------- target_series : pyleoclim.Series A pyleoclim Series object timespan : tuple The time interval over which to perform the calculation alpha : float The significance level (default: 0.05) settings : dict Parameters for the correlation function, including: nsim : int the number of simulations (default: 1000) method : str, {'ttest','isopersistent','isospectral' (default)} method for significance testing common_time_kwargs : dict Parameters for the method `MultipleSeries.common_time()`. Will use interpolation by default. seed : float or int random seed for isopersistent and isospectral methods Returns ------- corr : pyleoclim.ui.Corr the result object, containing - r : float correlation coefficient - p : float the p-value - signif : bool true if significant; false otherwise Note that signif = True if and only if p <= alpha. - alpha : float the significance level See also -------- pyleoclim.utils.correlation.corr_sig : Correlation function Examples -------- Correlation between the Nino3.4 index and the Deasonalized All Indian Rainfall Index .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/wtc_test_data_nino.csv') t = data.iloc[:, 0] air = data.iloc[:, 1] nino = data.iloc[:, 2] ts_nino = pyleo.Series(time=t, value=nino) ts_air = pyleo.Series(time=t, value=air) # with `nsim=20` and default `method='isospectral'` # set an arbitrary randome seed to fix the result corr_res = ts_nino.correlation(ts_air, settings={'nsim': 20}, seed=2333) print(corr_res) # using a simple t-test # set an arbitrary randome seed to fix the result corr_res = ts_nino.correlation(ts_air, settings={'nsim': 20, 'method': 'ttest'}, seed=2333) print(corr_res) # using the method "isopersistent" # set an arbitrary random seed to fix the result corr_res = ts_nino.correlation(ts_air, settings={'nsim': 20, 'method': 'isopersistent'}, seed=2333) print(corr_res) ''' settings = {} if settings is None else settings.copy() corr_args = {'alpha': alpha} corr_args.update(settings) ms = MultipleSeries([self, target_series]) if list(self.time) != list(target_series.time): common_time_kwargs = {} if common_time_kwargs is None else common_time_kwargs.copy() ct_args = {'method': 'interp'} ct_args.update(common_time_kwargs) ms = ms.common_time(**ct_args) if timespan is None: value1 = ms.series_list[0].value value2 = ms.series_list[1].value else: value1 = ms.series_list[0].slice(timespan).value value2 = ms.series_list[1].slice(timespan).value if seed is not None: np.random.seed(seed) corr_res = corrutils.corr_sig(value1, value2, **corr_args) signif = True if corr_res['signif'] == 1 else False corr = Corr(corr_res['r'], corr_res['p'], signif, alpha) return corr def causality(self, target_series, method='liang', settings=None): ''' Perform causality analysis with the target timeseries The timeseries are first sorted in ascending order. Parameters ---------- target_series : pyleoclim.Series A pyleoclim Series object on which to compute causality method : {'liang', 'granger'} The causality method to use. settings : dict Parameters associated with the causality methods. Note that each method has different parameters. See individual methods for details Returns ------- res : dict Dictionary containing the results of the the causality analysis. See indivudal methods for details See also -------- pyleoclim.utils.causality.liang_causality : Liang causality pyleoclim.utils.causality.granger_causality : Granger causality Examples -------- Liang causality .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/wtc_test_data_nino.csv') t=data.iloc[:,0] air=data.iloc[:,1] nino=data.iloc[:,2] ts_nino=pyleo.Series(time=t,value=nino) ts_air=pyleo.Series(time=t,value=air) # plot the two timeseries @savefig ts_nino.png fig, ax = ts_nino.plot(title='NINO3 -- SST Anomalies') pyleo.closefig(fig) @savefig ts_air.png fig, ax = ts_air.plot(title='Deasonalized All Indian Rainfall Index') pyleo.closefig(fig) # we use the specific params below in ts_nino.causality() just to make the example less heavier; # please drop the `settings` for real work caus_res = ts_nino.causality(ts_air, settings={'nsim': 2, 'signif_test': 'isopersist'}) print(caus_res) Granger causality .. ipython:: python :okwarning: :okexcept: caus_res = ts_nino.causality(ts_air, method='granger') print(caus_res) ''' # Sort both timeseries sorted_self = self.sort(verbose=True) sorted_target = target_series.sort(verbose=True) settings = {} if settings is None else settings.copy() spec_func={ 'liang':causalutils.liang_causality, 'granger':causalutils.granger_causality} args = {} args['liang'] = {} args['granger'] = {} args[method].update(settings) causal_res = spec_func[method](sorted_self.value, sorted_target.value, **args[method]) return causal_res def surrogates(self, method='ar1', number=1, length=None, seed=None, settings=None): ''' Generate surrogates with increasing time axis Parameters ---------- method : {ar1} Uses an AR1 model to generate surrogates of the timeseries number : int The number of surrogates to generate length : int Lenght of the series seed : int Control seed option for reproducibility settings : dict Parameters for surogate generator. See individual methods for details. Returns ------- surr : pyleoclim SurrogateSeries See also -------- pyleoclim.utils.tsmodel.ar1_sim : AR1 simulator ''' settings = {} if settings is None else settings.copy() surrogate_func = { 'ar1': tsmodel.ar1_sim, } args = {} args['ar1'] = {'t': self.time} args[method].update(settings) if seed is not None: np.random.seed(seed) surr_res = surrogate_func[method](self.value, number, **args[method]) if len(np.shape(surr_res)) == 1: surr_res = surr_res[:, np.newaxis] s_list = [] for s in surr_res.T: s_tmp = Series(time=self.time, value=s, time_name=self.time_name, time_unit=self.time_unit, value_name=self.value_name, value_unit=self.value_unit) s_list.append(s_tmp) surr = SurrogateSeries(series_list=s_list, surrogate_method=method, surrogate_args=args[method]) return surr def outliers(self, auto=True, remove=True, fig_outliers=True,fig_knee=True, plot_outliers_kwargs=None,plot_knee_kwargs=None,figsize=[10,4], saveknee_settings=None,saveoutliers_settings=None, mute=False): ''' Detects outliers in a timeseries and removes if specified Parameters ---------- auto : boolean True by default, detects knee in the plot automatically remove : boolean True by default, removes all outlier points if detected fig_knee : boolean True by default, plots knee plot if true fig_outliers : boolean True by degault, plots outliers if true save_knee : dict default parameters from matplotlib savefig None by default save_outliers : dict default parameters from matplotlib savefig None by default plot_knee_kwargs : dict arguments for the knee plot plot_outliers_kwargs : dict arguments for the outliers plot figsize : list by default [10,4] mute : {True,False} if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) Returns ------- new : Series Time series with outliers removed if they exist See also -------- pyleoclim.utils.tsutils.remove_outliers : remove outliers function pyleoclim.utils.plotting.plot_xy : basic x-y plot pyleoclim.utils.plotting.plot_scatter_xy : Scatter plot on top of a line plot ''' new = self.copy() #outlier_indices,fig1,ax1,fig2,ax2 = tsutils.detect_outliers(self.time, self.value, auto=auto, plot_knee=fig_knee,plot_outliers=fig_outliers,\ # figsize=figsize,save_knee=save_knee,save_outliers=save_outliers,plot_outliers_kwargs=plot_outliers_kwargs,plot_knee_kwargs=plot_knee_kwargs) outlier_indices = tsutils.detect_outliers( self.time, self.value, auto=auto, plot_knee=fig_knee,plot_outliers=fig_outliers, figsize=figsize,saveknee_settings=saveknee_settings,saveoutliers_settings=saveoutliers_settings, plot_outliers_kwargs=plot_outliers_kwargs,plot_knee_kwargs=plot_knee_kwargs, mute=mute ) outlier_indices = np.asarray(outlier_indices) if remove == True: new = self.copy() ys = np.delete(self.value, outlier_indices) t = np.delete(self.time, outlier_indices) new.value = ys new.time = t return new def interp(self, method='linear', **kwargs): '''Interpolate a Series object onto a new time axis Parameters ---------- method : {‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’} where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’ refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and ‘next’ simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is ‘linear’. kwargs : Arguments specific to each interpolation function. See pyleoclim.utils.tsutils.interp for details Returns ------- new : pyleoclim.Series An interpolated Series object See also -------- pyleoclim.utils.tsutils.interp : interpolation function ''' new = self.copy() ti, vi = tsutils.interp(self.time,self.value,interp_type=method,**kwargs) new.time = ti new.value = vi return new def gkernel(self, step_type='median', **kwargs): ''' Coarse-grain a Series object via a Gaussian kernel. Parameters ---------- step_type : str type of timestep: 'mean', 'median', or 'max' of the time increments kwargs : Arguments for kernel function. See pyleoclim.utils.tsutils.gkernel for details Returns ------- new : pyleoclim.Series The coarse-grained Series object See also -------- pyleoclim.utils.tsutils.gkernel : application of a Gaussian kernel ''' new=self.copy() ti, vi = tsutils.gkernel(self.time, self.value, **kwargs) # apply kernel new.time = ti new.value = vi return new def bin(self,**kwargs): '''Bin values in a time series Parameters ---------- kwargs : Arguments for binning function. See pyleoclim.utils.tsutils.bin for details Returns ------- new : pyleoclim.Series An binned Series object See also -------- pyleoclim.utils.tsutils.bin : bin the time series into evenly-spaced bins ''' new=self.copy() res_dict = tsutils.bin(self.time,self.value,**kwargs) new.time = res_dict['bins'] new.value = res_dict['binned_values'] return new class PSD: '''PSD object obtained from spectral analysis. See examples in pyleoclim.core.ui.Series.spectral to see how to create and manipulate these objects See also -------- pyleoclim.core.ui.Series.spectral : spectral analysis ''' def __init__(self, frequency, amplitude, label=None, timeseries=None, plot_kwargs=None, spec_method=None, spec_args=None, signif_qs=None, signif_method=None, period_unit=None, beta_est_res=None): self.frequency = np.array(frequency) self.amplitude = np.array(amplitude) self.label = label self.timeseries = timeseries self.spec_method = spec_method self.spec_args = spec_args self.signif_qs = signif_qs self.signif_method = signif_method self.plot_kwargs = {} if plot_kwargs is None else plot_kwargs.copy() self.beta_est_res = beta_est_res if period_unit is not None: self.period_unit = period_unit elif timeseries is not None: self.period_unit = infer_period_unit_from_time_unit(timeseries.time_unit) else: self.period_unit = None def copy(self): '''Copy object ''' return deepcopy(self) def __str__(self): table = { 'Frequency': self.frequency, 'Amplitude': self.amplitude, } msg = print(tabulate(table, headers='keys')) return f'Length: {np.size(self.frequency)}' def signif_test(self, number=None, method='ar1', seed=None, qs=[0.95], settings=None, scalogram = None): ''' Parameters ---------- number : int, optional Number of surrogate series to generate for significance testing. The default is None. method : {ar1}, optional Method to generate surrogates. The default is 'ar1'. seed : int, optional Option to set the seed for reproducibility. The default is None. qs : list, optional Singificance levels to return. The default is [0.95]. settings : dict, optional Parameters. The default is None. scalogram : Pyleoclim Scalogram object, optional Scalogram containing signif_scals exported during significance testing of scalogram. If number is None and signif_scals are present, will use length of scalogram list as number of significance tests Returns ------- new : pyleoclim.PSD New PSD object with appropriate significance test Examples -------- If significance tests from a comparable scalogram have been saved, they can be passed here to speed up the generation of noise realizations for significance testing .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd ts=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/master/example_data/soi_data.csv',skiprows = 1) series = pyleo.Series(time = ts['Year'],value = ts['Value'], time_name = 'Years', time_unit = 'AD') #Setting export_scal to True saves the noise realizations generated during significance testing for future use scalogram = series.wavelet().signif_test(number=2,export_scal=True) #The psd can be calculated by using the previously generated scalogram psd = series.spectral(scalogram=scalogram) #The same scalogram can then be passed to do significance testing. Pyleoclim will dig through the scalogram to find the saved noise realizations and reuse them flexibly. fig, ax = psd.signif_test(scalogram=scalogram).plot() pyleo.showfig(fig) pyleo.closefig(fig) ''' signif_scals = None if scalogram: try: signif_scals = scalogram.signif_scals except: return ValueError('Could not find signif_scals in passed object, make sure this is a scalogram with signif_scals that were saved during significance testing') if number is None and signif_scals: number = len(signif_scals.scalogram_list) elif number is None and signif_scals is None: number = 200 elif number == 0: return self new = self.copy() surr = self.timeseries.surrogates( number=number, seed=seed, method=method, settings=settings ) if signif_scals: surr_psd = surr.spectral( method=self.spec_method, settings=self.spec_args, scalogram_list=signif_scals ) else: surr_psd = surr.spectral(method=self.spec_method, settings=self.spec_args) new.signif_qs = surr_psd.quantiles(qs=qs) new.signif_method = method return new def beta_est(self, fmin=None, fmax=None, logf_binning_step='max', verbose=False): ''' Estimate the scaling factor beta of the PSD in a log-log space Parameters ---------- fmin : float the minimum frequency edge for beta estimation; the default is the minimum of the frequency vector of the PSD obj fmax : float the maximum frequency edge for beta estimation; the default is the maximum of the frequency vector of the PSD obj logf_binning_step : str, {'max', 'first'} if 'max', then the maximum spacing of log(f) will be used as the binning step if 'first', then the 1st spacing of log(f) will be used as the binning step verbose : bool If True, will print warning messages if there is any Returns ------- new : pyleoclim.PSD New PSD object with the estimated scaling slope information, which is stored as a dictionary that includes: - beta: the scaling factor - std_err: the one standard deviation error of the scaling factor - f_binned: the binned frequency series, used as X for linear regression - psd_binned: the binned PSD series, used as Y for linear regression - Y_reg: the predicted Y from linear regression, used with f_binned for the slope curve plotting Examples -------- .. ipython:: python :okwarning: :okexcept: # generate colored noise with default scaling slope 'alpha' equals to 1 ts = pyleo.gen_ts(model='colored_noise') ts.label = 'colored noise' psd = ts.spectral() # estimate the scaling slope psd_beta = psd.beta_est(fmin=1/50, fmax=1/2) @savefig color_noise_beta.png fig, ax = psd_beta.plot() pyleo.closefig(fig) ''' if fmin is None: fmin = np.min(self.frequency) if fmax is None: fmax = np.max(self.frequency) res = waveutils.beta_estimation(self.amplitude, self.frequency, fmin=fmin, fmax=fmax, logf_binning_step=logf_binning_step, verbose=verbose) res_dict = { 'beta': res.beta, 'std_err': res.std_err, 'f_binned': res.f_binned, 'psd_binned': res.psd_binned, 'Y_reg': res.Y_reg, } new = self.copy() new.beta_est_res = res_dict return new def plot(self, in_loglog=True, in_period=True, label=None, xlabel=None, ylabel='PSD', title=None, marker=None, markersize=None, color=None, linestyle=None, linewidth=None, transpose=False, xlim=None, ylim=None, figsize=[10, 4], savefig_settings=None, ax=None, mute=False, legend=True, lgd_kwargs=None, xticks=None, yticks=None, alpha=None, zorder=None, plot_kwargs=None, signif_clr='red', signif_linestyles=['--', '-.', ':'], signif_linewidth=1, plot_beta=True, beta_kwargs=None): '''Plots the PSD estimates and signif level if included Parameters ---------- in_loglog : bool, optional Plot on loglog axis. The default is True. in_period : bool, optional Plot the x-axis as periodicity rather than frequency. The default is True. label : str, optional label for the series. The default is None. xlabel : str, optional Label for the x-axis. The default is None. Will guess based on Series ylabel : str, optional Label for the y-axis. The default is 'PSD'. title : str, optional Plot title. The default is None. marker : str, optional marker to use. The default is None. markersize : int, optional size of the marker. The default is None. color : str, optional Line color. The default is None. linestyle : str, optional linestyle. The default is None. linewidth : float, optional Width of the line. The default is None. transpose : bool, optional Plot periodicity on y-. The default is False. xlim : list, optional x-axis limits. The default is None. ylim : list, optional y-axis limits. The default is None. figsize : list, optional Figure size. The default is [10, 4]. savefig_settings : dict, optional save settings options. The default is None. the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} ax : ax, optional The matplotlib.Axes object onto which to return the plot. The default is None. mute : bool, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax The default is False. (going to be deprecated) legend : bool, optional whether to plot the legend. The default is True. lgd_kwargs : dict, optional Arguments for the legend. The default is None. xticks : list, optional xticks to use. The default is None. yticks : list, optional yticks to use. The default is None. alpha : float, optional Transparency setting. The default is None. zorder : int, optional Order for the plot. The default is None. plot_kwargs : dict, optional Other plotting argument. The default is None. signif_clr : str, optional Color for the significance line. The default is 'red'. signif_linestyles : list of str, optional Linestyles for significance. The default is ['--', '-.', ':']. signif_linewidth : float, optional width of the significance line. The default is 1. plot_beta : boll, optional If True and self.beta_est_res is not None, then the scaling slope line will be plotted beta_kwargs : dict, optional The visualization keyword arguments for the scaling slope Returns ------- fig, ax See also -------- pyleoclim.core.ui.Series.spectral : spectral analysis ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() plot_kwargs = self.plot_kwargs if plot_kwargs is None else plot_kwargs.copy() beta_kwargs = {} if beta_kwargs is None else beta_kwargs.copy() lgd_kwargs = {} if lgd_kwargs is None else lgd_kwargs.copy() if label is None: if plot_beta and self.beta_est_res is not None: label = fr'{self.label} ($\beta=${self.beta_est_res["beta"]:.2f}$\pm${self.beta_est_res["std_err"]:.2f})' else: label = self.label if label is not None: plot_kwargs.update({'label': label}) if marker is not None: plot_kwargs.update({'marker': marker}) if markersize is not None: plot_kwargs.update({'markersize': markersize}) if color is not None: plot_kwargs.update({'color': color}) if linestyle is not None: plot_kwargs.update({'linestyle': linestyle}) if linewidth is not None: plot_kwargs.update({'linewidth': linewidth}) if alpha is not None: plot_kwargs.update({'alpha': alpha}) if zorder is not None: plot_kwargs.update({'zorder': zorder}) if ax is None: fig, ax = plt.subplots(figsize=figsize) if in_period: idx = np.argwhere(self.frequency==0) x_axis = 1/np.delete(self.frequency, idx) y_axis = np.delete(self.amplitude, idx) if xlabel is None: xlabel = f'Period [{self.period_unit}]' if self.period_unit is not None else 'Period' if xticks is None: xticks_default = np.array([0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 1e4, 2e4, 5e4, 1e5, 2e5, 5e5, 1e6]) mask = (xticks_default >= np.nanmin(x_axis)) & (xticks_default <= np.nanmax(x_axis)) xticks = xticks_default[mask] if xlim is None: xlim = [np.max(xticks), np.min(xticks)] else: idx = np.argwhere(self.frequency==0) x_axis = np.delete(self.frequency, idx) y_axis = np.delete(self.amplitude, idx) if xlabel is None: xlabel = f'Frequency [1/{self.period_unit}]' if self.period_unit is not None else 'Frequency' if xlim is None: xlim = ax.get_xlim() xlim = [np.min(xlim), np.max(xlim)] if transpose: x_axis, y_axis = y_axis, x_axis xlim, ylim = ylim, xlim xticks, yticks = yticks, xticks xlabel, ylabel = ylabel, xlabel ax.set_ylim(ylim[::-1]) else: ax.set_xlim(xlim) ax.plot(x_axis, y_axis, **plot_kwargs) # plot significance levels if self.signif_qs is not None: signif_method_label = { 'ar1': 'AR(1)', } nqs = np.size(self.signif_qs.psd_list) for i, q in enumerate(self.signif_qs.psd_list): idx = np.argwhere(q.frequency==0) signif_x_axis = 1/np.delete(q.frequency, idx) if in_period else np.delete(q.frequency, idx) signif_y_axis = np.delete(q.amplitude, idx) if transpose: signif_x_axis, signif_y_axis = signif_y_axis, signif_x_axis ax.plot( signif_x_axis, signif_y_axis, label=f'{signif_method_label[self.signif_method]}, {q.label} threshold', color=signif_clr, linestyle=signif_linestyles[i%3], linewidth=signif_linewidth, ) if in_loglog: ax.set_xscale('log') ax.set_yscale('log') if xticks is not None: ax.set_xticks(xticks) ax.xaxis.set_major_formatter(ScalarFormatter()) ax.xaxis.set_major_formatter(FormatStrFormatter('%g')) if yticks is not None: ax.set_yticks(yticks) ax.yaxis.set_major_formatter(ScalarFormatter()) ax.yaxis.set_major_formatter(FormatStrFormatter('%g')) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) if plot_beta and self.beta_est_res is not None: plot_beta_kwargs = { 'linestyle': '--', 'color': 'k', 'linewidth': 1, 'zorder': 99, } plot_beta_kwargs.update(beta_kwargs) beta_x_axis = 1/self.beta_est_res['f_binned'] beta_y_axis = self.beta_est_res['Y_reg'] if transpose: beta_x_axis, beta_y_axis = beta_y_axis, beta_x_axis ax.plot(beta_x_axis, beta_y_axis , **plot_beta_kwargs) if legend: lgd_args = {'frameon': False} lgd_args.update(lgd_kwargs) ax.legend(**lgd_args) if title is not None: ax.set_title(title) if xlim is not None: ax.set_xlim(xlim) if ylim is not None: ax.set_ylim(ylim) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax class Scalogram: def __init__(self, frequency, time, amplitude, coi=None, label=None, Neff=3, wwz_Neffs=None, timeseries=None, wave_method=None, wave_args=None, signif_qs=None, signif_method=None, freq_method=None, freq_kwargs=None, period_unit=None, time_label=None, signif_scals=None): ''' Parameters ---------- frequency : array the frequency axis time : array the time axis amplitude : array the amplitude at each (frequency, time) point; note the dimension is assumed to be (frequency, time) coi : array Cone of influence label : str Label for the Series Neff : int the threshold of the number of effective samples wwz_Neffs : array the matrix of effective number of points in the time-scale coordinates obtained from wwz timeseries : pyleoclim.Series A copy of the timeseries for which the scalogram was obtained wave_method: str The method used to obtain the scalogram wave_args: dict The parameters values of the wavelet method signif_qs : dict The significance limits signif_method: str The method used to obtain the significance level freq_method: str The method used to obtain the frequency vector freq_kwargs: dict Arguments for the frequency vector period_unit: str Units for the period axis time_label: str Label for the time axis signif_scals: pyleoclim.MultipleScalogram A list of the scalogram from the AR1 MC significance testing. Useful when obtaining a PSD. ''' self.frequency = np.array(frequency) self.time = np.array(time) self.amplitude = np.array(amplitude) if coi is not None: self.coi = np.array(coi) else: self.coi = waveutils.make_coi(self.time, Neff=Neff) self.label = label self.timeseries = timeseries self.wave_method = wave_method self.wave_args = wave_args self.signif_qs = signif_qs self.signif_method = signif_method self.freq_method = freq_method self.freq_kwargs = freq_kwargs self.signif_scals = signif_scals #if wave_method == 'wwz': self.wwz_Neffs = wwz_Neffs if period_unit is not None: self.period_unit = period_unit elif timeseries is not None: self.period_unit = infer_period_unit_from_time_unit(timeseries.time_unit) else: self.period_unit = None if time_label is not None: self.time_label = time_label elif timeseries is not None: if timeseries.time_unit is not None: self.time_label = f'{timeseries.time_name} [{timeseries.time_unit}]' else: self.time_label = f'{timeseries.time_name}' else: self.time_label = None def copy(self): '''Copy object ''' return deepcopy(self) def __str__(self): table = { 'Frequency': self.frequency, 'Time': self.time, 'Amplitude': self.amplitude, } msg = print(tabulate(table, headers='keys')) return f'Dimension: {np.size(self.frequency)} x {np.size(self.time)}' def plot(self, in_period=True, xlabel=None, ylabel=None, title=None, ylim=None, xlim=None, yticks=None, figsize=[10, 8], mute=False, signif_clr='white', signif_linestyles='-', signif_linewidths=1, contourf_style={}, cbar_style={}, savefig_settings={}, ax=None): '''Plot the scalogram Parameters ---------- in_period : bool, optional Plot the in period instead of frequency space. The default is True. xlabel : str, optional Label for the x-axis. The default is None. ylabel : str, optional Label for the y-axis. The default is None. title : str, optional Title for the figure. The default is None. ylim : list, optional Limits for the y-axis. The default is None. xlim : list, optional Limits for the x-axis. The default is None. yticks : list, optional yticks label. The default is None. figsize : list, optional Figure size The default is [10, 8]. mute : bool, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax The default is False. (going to be deprecated) signif_clr : str, optional Color of the singificance line. The default is 'white'. signif_linestyles : str, optional Linestyle of the significance line. The default is '-'. signif_linewidths : float, optional Width for the significance line. The default is 1. contourf_style : dict, optional Arguments for the contour plot. The default is {}. cbar_style : dict, optional Arguments for the colarbar. The default is {}. savefig_settings : dict, optional saving options for the figure. The default is {}. ax : ax, optional Matplotlib Axis on which to return the figure. The default is None. Returns ------- fig, ax See also -------- pyleoclim.core.ui.Series.wavelet : Wavelet analysis ''' contourf_args = {'cmap': 'magma', 'origin': 'lower', 'levels': 11} contourf_args.update(contourf_style) if ax is None: fig, ax = plt.subplots(figsize=figsize) if in_period: y_axis = 1/self.frequency if ylabel is None: ylabel = f'Period [{self.period_unit}]' if self.period_unit is not None else 'Period' if yticks is None: yticks_default = np.array([0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 1e4, 2e4, 5e4, 1e5, 2e5, 5e5, 1e6]) mask = (yticks_default >= np.min(y_axis)) & (yticks_default <= np.max(y_axis)) yticks = yticks_default[mask] else: y_axis = self.frequency if ylabel is None: ylabel = f'Frequency [1/{self.period_unit}]' if self.period_unit is not None else 'Frequency' cont = ax.contourf(self.time, y_axis, self.amplitude.T, **contourf_args) ax.set_yscale('log') # plot colorbar cbar_args = {'drawedges': False, 'orientation': 'vertical', 'fraction': 0.15, 'pad': 0.05} cbar_args.update(cbar_style) cb = plt.colorbar(cont, **cbar_args) # plot cone of influence if self.coi is not None: ax.plot(self.time, self.coi, 'k--') if yticks is not None: ax.set_yticks(yticks) ax.yaxis.set_major_formatter(ScalarFormatter()) ax.yaxis.set_major_formatter(FormatStrFormatter('%g')) if title is not None: ax.set_title(title) if ylim is None: ylim = [np.min(y_axis), np.min([np.max(y_axis), np.max(self.coi)])] ax.fill_between(self.time, self.coi, np.max(self.coi), color='white', alpha=0.5) # plot significance levels if self.signif_qs is not None: signif_method_label = { 'ar1': 'AR(1)', } signif_scal = self.signif_qs.scalogram_list[0] signif_boundary = self.amplitude.T / signif_scal.amplitude.T ax.contour( self.time, y_axis, signif_boundary, [-99, 1], colors=signif_clr, linestyles=signif_linestyles, linewidths=signif_linewidths, ) if xlabel is None: xlabel = self.time_label if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) if xlim is not None: ax.set_xlim(xlim) ax.set_ylim(ylim) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax def signif_test(self, number=None, method='ar1', seed=None, qs=[0.95], settings=None, export_scal = False): '''Significance test for wavelet analysis Parameters ---------- number : int, optional Number of surrogates to generate for significance analysis. The default is 200. method : {'ar1'}, optional Method to use to generate the surrogates. The default is 'ar1'. seed : int, optional Set the seed for the random number generator. Useful for reproducibility The default is None. qs : list, optional Significane level to consider. The default is [0.95]. settings : dict, optional Parameters for the model. The default is None. export_scal : bool Whether or not to export the scalograms used in the noise realizations. Note: The scalograms used for wavelet analysis are slightly different than those used for spectral analysis (different decay constant). As such, this functionality should be used only to expedite exploratory analysis. Raises ------ ValueError qs should be a list with at least one value. Returns ------- new : pyleoclim.Scalogram A new Scalogram object with the significance level Examples -------- Generating scalogram, running significance tests, and saving the output for future use in generating psd objects or in summary_plot() .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd ts=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/master/example_data/soi_data.csv',skiprows = 1) series = pyleo.Series(time = ts['Year'],value = ts['Value'], time_name = 'Years', time_unit = 'AD') #By setting export_scal to True, the noise realizations used to generate the significance test will be saved. These come in handy for generating summary plots and for running significance tests on spectral objects. scalogram = series.wavelet().signif_test(number=2, export_scal=True) See also -------- pyleoclim.core.ui.Series.wavelet : wavelet analysis ''' if hasattr(self,'signif_scals'): signif_scals = self.signif_scals #Allow for a few different configurations of passed number of signif tests, default behavior is to set number = 200 if number is None and signif_scals is not None: number = len(signif_scals.scalogram_list) elif number is None and signif_scals is None: number = 200 elif number == 0: return self new = self.copy() if signif_scals: scalogram_list = signif_scals.scalogram_list #If signif_scals already in scalogram object are more than those requested for significance testing, use as many of them as required if len(scalogram_list) > number: surr_scal = MultipleScalogram(scalogram_list=scalogram_list[:number]) #If number is the same as the length of signif_scals, just use signif_scals elif len(scalogram_list) == number: surr_scal = signif_scals #If the number is more than the length of signif_scals, reuse what is available and calculate the rest elif len(scalogram_list) < number: number -= len(scalogram_list) surr_scal_tmp = [] surr_scal_tmp.extend(scalogram_list) surr = self.timeseries.surrogates( number=number, seed=seed, method=method, settings=settings ) surr_scal_tmp.extend(surr.wavelet(method=self.wave_method, settings=self.wave_args,).scalogram_list) surr_scal = MultipleScalogram(scalogram_list=surr_scal_tmp) else: surr = self.timeseries.surrogates( number=number, seed=seed, method=method, settings=settings ) surr_scal = surr.wavelet(method=self.wave_method, settings=self.wave_args,) if len(qs) > 1: raise ValueError('qs should be a list with size 1!') new.signif_qs = surr_scal.quantiles(qs=qs) new.signif_method = method if export_scal == True: new.signif_scals = surr_scal return new class Coherence: '''Coherence object See also -------- pyleoclim.core.ui.Series.wavelet_coherence : Wavelet coherence ''' def __init__(self, frequency, time, coherence, phase, coi=None, timeseries1=None, timeseries2=None, signif_qs=None, signif_method=None, freq_method=None, freq_kwargs=None, Neff=3, period_unit=None, time_label=None): self.frequency = np.array(frequency) self.time = np.array(time) self.coherence = np.array(coherence) if coi is not None: self.coi = np.array(coi) else: self.coi = waveutils.make_coi(self.time, Neff=Neff) self.phase = np.array(phase) self.timeseries1 = timeseries1 self.timeseries2 = timeseries2 self.signif_qs = signif_qs self.signif_method = signif_method self.freq_method = freq_method self.freq_kwargs = freq_kwargs if period_unit is not None: self.period_unit = period_unit elif timeseries1 is not None: self.period_unit = infer_period_unit_from_time_unit(timeseries1.time_unit) elif timeseries2 is not None: self.period_unit = infer_period_unit_from_time_unit(timeseries2.time_unit) else: self.period_unit = None if time_label is not None: self.time_label = time_label elif timeseries1 is not None: if timeseries1.time_unit is not None: self.time_label = f'{timeseries1.time_name} [{timeseries1.time_unit}]' else: self.time_label = f'{timeseries1.time_name}' elif timeseries2 is not None: if timeseries2.time_unit is not None: self.time_label = f'{timeseries2.time_name} [{timeseries2.time_unit}]' else: self.time_label = f'{timeseries2.time_name}' else: self.time_label = None def copy(self): '''Copy object ''' return deepcopy(self) def plot(self, xlabel=None, ylabel=None, title=None, figsize=[10, 8], ylim=None, xlim=None, in_period=True, yticks=None, mute=False, contourf_style={}, phase_style={}, cbar_style={}, savefig_settings={}, ax=None, signif_clr='white', signif_linestyles='-', signif_linewidths=1, under_clr='ivory', over_clr='black', bad_clr='dimgray'): '''Plot the cross-wavelet results Parameters ---------- xlabel : str, optional x-axis label. The default is None. ylabel : str, optional y-axis label. The default is None. title : str, optional Title of the plot. The default is None. figsize : list, optional Figure size. The default is [10, 8]. ylim : list, optional y-axis limits. The default is None. xlim : list, optional x-axis limits. The default is None. in_period : bool, optional Plots periods instead of frequencies The default is True. yticks : list, optional y-ticks label. The default is None. mute : bool, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax The default is False. The default is False. (going to be deprecated) contourf_style : dict, optional Arguments for the contour plot. The default is {}. phase_style : dict, optional Arguments for the phase arrows. The default is {}. It includes: - 'pt': the default threshold above which phase arrows will be plotted - 'skip_x': the number of points to skip between phase arrows along the x-axis - 'skip_y': the number of points to skip between phase arrows along the y-axis - 'scale': number of data units per arrow length unit (see matplotlib.pyplot.quiver) - 'width': shaft width in arrow units (see matplotlib.pyplot.quiver) cbar_style : dict, optional Arguments for the color bar. The default is {}. savefig_settings : dict, optional The default is {}. the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} ax : ax, optional Matplotlib axis on which to return the figure. The default is None. signif_clr : str, optional Color of the singificance line. The default is 'white'. signif_linestyles : str, optional Style of the significance line. The default is '-'. signif_linewidths : float, optional Width of the significance line. The default is 1. under_clr : str, optional Color for under 0. The default is 'ivory'. over_clr : str, optional Color for over 1. The default is 'black'. bad_clr : str, optional Color for missing values. The default is 'dimgray'. Returns ------- fig, ax See also -------- pyleoclim.core.ui.Series.wavelet_coherence matplotlib.pyplot.quiver ''' if ax is None: fig, ax = plt.subplots(figsize=figsize) # handling NaNs mask_freq = [] for i in range(np.size(self.frequency)): if all(np.isnan(self.coherence[:, i])): mask_freq.append(False) else: mask_freq.append(True) if in_period: y_axis = 1/self.frequency[mask_freq] if ylabel is None: ylabel = f'Period [{self.period_unit}]' if self.period_unit is not None else 'Period' if yticks is None: yticks_default = np.array([0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 1e4, 2e4, 5e4, 1e5, 2e5, 5e5, 1e6]) mask = (yticks_default >= np.min(y_axis)) & (yticks_default <= np.max(y_axis)) yticks = yticks_default[mask] else: y_axis = self.frequency[mask_freq] if ylabel is None: ylabel = f'Frequency [1/{self.period_unit}]' if self.period_unit is not None else 'Frequency' # plot coherence amplitude contourf_args = { 'cmap': 'magma', 'origin': 'lower', 'levels': np.linspace(0, 1, 11), } contourf_args.update(contourf_style) cmap = cm.get_cmap(contourf_args['cmap']) cmap.set_under(under_clr) cmap.set_over(over_clr) cmap.set_bad(bad_clr) contourf_args['cmap'] = cmap cont = ax.contourf(self.time, y_axis, self.coherence[:, mask_freq].T, **contourf_args) # plot significance levels if self.signif_qs is not None: signif_method_label = { 'ar1': 'AR(1)', } signif_coh = self.signif_qs.scalogram_list[0] signif_boundary = self.coherence[:, mask_freq].T / signif_coh.amplitude[:, mask_freq].T ax.contour( self.time, y_axis, signif_boundary, [-99, 1], colors=signif_clr, linestyles=signif_linestyles, linewidths=signif_linewidths, ) # plot colorbar cbar_args = { 'drawedges': False, 'orientation': 'vertical', 'fraction': 0.15, 'pad': 0.05, 'ticks': np.linspace(0, 1, 11) } cbar_args.update(cbar_style) cb = plt.colorbar(cont, **cbar_args) # plot cone of influence ax.set_yscale('log') ax.plot(self.time, self.coi, 'k--') if ylim is None: ylim = [np.min(y_axis), np.min([np.max(y_axis), np.max(self.coi)])] ax.fill_between(self.time, self.coi, np.max(self.coi), color='white', alpha=0.5) if yticks is not None: ax.set_yticks(yticks) ax.yaxis.set_major_formatter(ScalarFormatter()) ax.yaxis.set_major_formatter(FormatStrFormatter('%g')) if xlabel is None: xlabel = self.time_label if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) # plot phase skip_x = np.max([int(np.size(self.time)//20), 1]) skip_y = np.max([int(np.size(y_axis)//20), 1]) phase_args = {'pt': 0.5, 'skip_x': skip_x, 'skip_y': skip_y, 'scale': 30, 'width': 0.004} phase_args.update(phase_style) pt = phase_args['pt'] skip_x = phase_args['skip_x'] skip_y = phase_args['skip_y'] scale = phase_args['scale'] width = phase_args['width'] phase = np.copy(self.phase)[:, mask_freq] if self.signif_qs is None: phase[self.coherence[:, mask_freq] < pt] = np.nan else: phase[signif_boundary.T < 1] = np.nan X, Y = np.meshgrid(self.time, 1/self.frequency[mask_freq]) U, V = np.cos(phase).T, np.sin(phase).T ax.quiver(X[::skip_y, ::skip_x], Y[::skip_y, ::skip_x], U[::skip_y, ::skip_x], V[::skip_y, ::skip_x], scale=scale, width=width, zorder=99) ax.set_ylim(ylim) if xlim is not None: ax.set_xlim(xlim) if title is not None: ax.set_title(title) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax def signif_test(self, number=200, method='ar1', seed=None, qs=[0.95], settings=None, mute_pbar=False): '''Significance testing Parameters ---------- number : int, optional Number of surrogate series to create for significance testing. The default is 200. method : {'ar1'}, optional Method through which to generate the surrogate series. The default is 'ar1'. seed : int, optional Fixes the seed for the random number generator. Useful for reproducibility. The default is None. qs : list, optional Significanc level to return. The default is [0.95]. settings : dict, optional Parameters for surrogate model. The default is None. mute_pbar : bool, optional Mute the progress bar. The default is False. Returns ------- new : pyleoclim.Coherence Coherence with significance level See also -------- pyleoclim.core.ui.Series.wavelet_coherence : Wavelet coherence ''' if number == 0: return self new = self.copy() surr1 = self.timeseries1.surrogates( number=number, seed=seed, method=method, settings=settings ) surr2 = self.timeseries2.surrogates( number=number, seed=seed, method=method, settings=settings ) cohs = [] for i in tqdm(range(number), desc='Performing wavelet coherence on surrogate pairs', total=number, disable=mute_pbar): coh_tmp = surr1.series_list[i].wavelet_coherence(surr2.series_list[i], settings={'tau': self.time, 'freq': self.frequency}) cohs.append(coh_tmp.coherence) cohs = np.array(cohs) ne, nf, nt = np.shape(cohs) coh_qs = np.ndarray(shape=(np.size(qs), nf, nt)) for i in range(nf): for j in range(nt): coh_qs[:,i,j] = mquantiles(cohs[:,i,j], qs) scal_list = [] for i, amp in enumerate(coh_qs): scal_tmp = Scalogram( frequency=self.frequency, time=self.time, amplitude=amp, coi=self.coi, freq_method=self.freq_method, freq_kwargs=self.freq_kwargs, label=f'{qs[i]*100:g}%', ) scal_list.append(scal_tmp) new.signif_qs = MultipleScalogram(scalogram_list=scal_list) new.signif_method = method return new class MultipleSeries: '''MultipleSeries object. This object handles a collection of the type Series and can be created from a list of such objects. MultipleSeries should be used when the need to run analysis on multiple records arises, such as running principal component analysis. Some of the methods automatically refocus the time axis prior to analysis to ensure that the analysis is run over the same time period. Parameters ---------- series_list : list a list of pyleoclim.Series objects time_unit : str The target time unit for every series in the list. If None, then no conversion will be applied; Otherwise, the time unit of every series in the list will be converted to the target. name : str name of the collection of timeseries (e.g. 'PAGES 2k ice cores') Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data = pd.read_csv( 'https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv', skiprows=0, header=1 ) time = data.iloc[:,1] value = data.iloc[:,2] ts1 = pyleo.Series(time=time, value=value, time_unit='years') ts2 = pyleo.Series(time=time, value=value, time_unit='years') ms = pyleo.MultipleSeries([ts1, ts2], name = 'SOI x2') ''' def __init__(self, series_list, time_unit=None, name=None): self.series_list = series_list self.time_unit = time_unit self.name = name if self.time_unit is not None: new_ts_list = [] for ts in self.series_list: new_ts = ts.convert_time_unit(time_unit=self.time_unit) new_ts_list.append(new_ts) self.series_list = new_ts_list def convert_time_unit(self, time_unit='years'): ''' Convert the time unit of the timeseries Parameters ---------- time_unit : str the target time unit, possible input: { 'year', 'years', 'yr', 'yrs', 'y BP', 'yr BP', 'yrs BP', 'year BP', 'years BP', 'ky BP', 'kyr BP', 'kyrs BP', 'ka BP', 'ka', 'my BP', 'myr BP', 'myrs BP', 'ma BP', 'ma', } Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo import pandas as pd data = pd.read_csv( 'https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv', skiprows=0, header=1 ) time = data.iloc[:,1] value = data.iloc[:,2] ts1 = pyleo.Series(time=time, value=value, time_unit='years') ts2 = pyleo.Series(time=time, value=value, time_unit='years') ms = pyleo.MultipleSeries([ts1, ts2]) new_ms = ms.convert_time_unit('yr BP') print('Original timeseries:') print('time unit:', ms.time_unit) print() print('Converted timeseries:') print('time unit:', new_ms.time_unit) ''' new_ms = self.copy() new_ts_list = [] for ts in self.series_list: new_ts = ts.convert_time_unit(time_unit=time_unit) new_ts_list.append(new_ts) new_ms.time_unit = time_unit new_ms.series_list = new_ts_list return new_ms def filter(self, cutoff_freq=None, cutoff_scale=None, method='butterworth', **kwargs): ''' Filtering the timeseries in the MultipleSeries object Parameters ---------- method : str, {'savitzky-golay', 'butterworth', 'firwin'} the filtering method - 'butterworth': the Butterworth method (default) - 'savitzky-golay': the Savitzky-Golay method - 'firwin': FIR filter design using the window method, with default window as Hamming - 'lanczos': lowpass filter via Lanczos resampling cutoff_freq : float or list The cutoff frequency only works with the Butterworth method. If a float, it is interpreted as a low-frequency cutoff (lowpass). If a list, it is interpreted as a frequency band (f1, f2), with f1 < f2 (bandpass). cutoff_scale : float or list cutoff_freq = 1 / cutoff_scale The cutoff scale only works with the Butterworth method and when cutoff_freq is None. If a float, it is interpreted as a low-frequency (high-scale) cutoff (lowpass). If a list, it is interpreted as a frequency band (f1, f2), with f1 < f2 (bandpass). kwargs : dict a dictionary of the keyword arguments for the filtering method, see `pyleoclim.utils.filter.savitzky_golay`, `pyleoclim.utils.filter.butterworth`, and `pyleoclim.utils.filter.firwin` for the details Returns ------- ms : pyleoclim.MultipleSeries See also -------- pyleoclim.utils.filter.butterworth : Butterworth method pyleoclim.utils.filter.savitzky_golay : Savitzky-Golay method pyleoclim.utils.filter.firwin : FIR filter design using the window method pyleoclim.utils.filter.lanczos : lowpass filter via Lanczos resampling ''' ms = self.copy() new_tslist = [] for ts in self.series_list: new_tslist.append(ts.filter(cutoff_freq=cutoff_freq, cutoff_scale=cutoff_scale, method=method, **kwargs)) ms.series_list = new_tslist return ms def append(self,ts): '''Append timeseries ts to MultipleSeries object Returns ------- ms : pyleoclim.MultipleSeries The augmented object, comprising the old one plus `ts` ''' ms = self.copy() ts_list = deepcopy(ms.series_list) ts_list.append(ts) ms = MultipleSeries(ts_list) return ms def copy(self): '''Copy the object ''' return deepcopy(self) def standardize(self): '''Standardize each series object in a collection Returns ------- ms : pyleoclim.MultipleSeries The standardized Series ''' ms=self.copy() for idx,item in enumerate(ms.series_list): s=item.copy() v_mod=tsutils.standardize(item.value)[0] s.value=v_mod ms.series_list[idx]=s return ms def grid_properties(self, step_style='median'): ''' Extract grid properties (start, stop, step) of all the Series objects in a collection. Parameters ---------- step_style : str Method to obtain a representative step if x is not evenly spaced. Valid entries: 'median' [default], 'mean', 'mode' or 'max' The mode is the most frequent entry in a dataset, and may be a good choice if the timeseries is nearly equally spaced but for a few gaps. Max is a conservative choice, appropriate for binning methods and Gaussian kernel coarse-graining Returns ------- grid_properties : numpy array n x 3 array, where n is the number of series ''' gp = np.empty((len(self.series_list),3)) # obtain grid parameters for idx,item in enumerate(self.series_list): item = item.clean(verbose=idx==0) gp[idx,:] = tsutils.grid_properties(item.time, step_style=step_style) return gp def common_time(self, method='interp', common_step = 'max', start=None, stop = None, step=None, step_style = None, **kwargs): ''' Aligns the time axes of a MultipleSeries object, via binning interpolation., or Gaussian kernel. Alignmentis critical for workflows that need to assume a common time axis for the group of series under consideration. The common time axis is characterized by the following parameters: start : the latest start date of the bunch (maximun of the minima) stop : the earliest stop date of the bunch (minimum of the maxima) step : The representative spacing between consecutive values Optional arguments for binning or interpolation are those of the underling functions. If the time axis are retrograde, this step makes them prograde. Parameters ---------- method : string either 'bin', 'interp' [default] or 'gkernel' common_step : string Method to obtain a representative step among all Series Valid entries: 'median' [default], 'mean', 'mode' or 'max' start : float starting point of the common time axis [default = None] stop : float end point of the common time axis [default = None] step : float increment the common time axis [default = None] if not provided, `pyleoclim` will use `grid_properties()` to determine these parameters step_style : 'string' step style to be applied from `grid_properties` [default = None] kwargs: dict keyword arguments (dictionary) of the various methods Returns ------- ms : pyleoclim.MultipleSeries The MultipleSeries objects with all series aligned to the same time axis. See also -------- pyleoclim.utils.tsutils.bin : put timeseries values into bins of equal size (possibly leaving NaNs in). pyleoclim.utils.tsutils.gkernel : coarse-graining using a Gaussian kernel pyleoclim.utils.tsutils.interp : interpolation onto a regular grid (default = linear interpolation) pyleoclim.utils.tsutils.grid_properties : infer grid properties Examples -------- .. ipython:: python :okwarning: :okexcept: import numpy as np import pyleoclim as pyleo import matplotlib.pyplot as plt from pyleoclim.utils.tsmodel import colored_noise # create 2 incompletely sampled series ns = 2 ; nt = 200; n_del = 20 serieslist = [] for j in range(ns): t = np.arange(nt) v = colored_noise(alpha=1, t=t) deleted_idx = np.random.choice(range(np.size(t)), n_del, replace=False) tu = np.delete(t, deleted_idx) vu = np.delete(v, deleted_idx) ts = pyleo.Series(time = tu, value = vu, label = 'series ' + str(j+1)) serieslist.append(ts) # create MS object from the list ms = pyleo.MultipleSeries(serieslist) fig, ax = plt.subplots(2,2) ax = ax.flatten() # apply common_time with default parameters msc = ms.common_time() msc.plot(title='linear interpolation',ax=ax[0]) # apply common_time with binning msc = ms.common_time(method='bin') msc.plot(title='Binning',ax=ax[1], legend=False) # apply common_time with gkernel msc = ms.common_time(method='gkernel') msc.plot(title=r'Gaussian kernel ($h=3$)',ax=ax[2],legend=False) # apply common_time with gkernel and a large bandwidth msc = ms.common_time(method='gkernel', h=11) msc.plot(title=r'Gaussian kernel ($h=11$)',ax=ax[3],legend=False) # display, save and close figure fig.tight_layout() @savefig ms_ct.png pyleo.showfig(fig) pyleo.closefig(fig) ''' if step_style == None: if method == 'bin' or method == 'gkernel': step_style = 'max' elif method == 'interp': step_style = 'mean' gp = self.grid_properties(step_style=step_style) # define parameters for common time axis start = gp[:,0].max() stop = gp[:,1].min() if start > stop: raise ValueError('At least one series has no common time interval with others. Please check the time axis of the series.') if step is None: if common_step == 'mean': step = gp[:,2].mean() elif common_step == 'max': step = gp[:,2].max() elif common_step == 'mode': step = stats.mode(gp[:,2])[0][0] else: step = np.median(gp[:,2]) ms = self.copy() if method == 'bin': for idx,item in enumerate(self.series_list): ts = item.copy() d = tsutils.bin(ts.time, ts.value, bin_size=step, start=start, stop=stop, evenly_spaced = False, **kwargs) ts.time = d['bins'] ts.value = d['binned_values'] ms.series_list[idx] = ts elif method == 'interp': for idx,item in enumerate(self.series_list): ts = item.copy() ti, vi = tsutils.interp(ts.time, ts.value, step=step, start=start, stop=stop,**kwargs) ts.time = ti ts.value = vi ms.series_list[idx] = ts elif method == 'gkernel': for idx,item in enumerate(self.series_list): ts = item.copy() ti, vi = tsutils.gkernel(ts.time,ts.value,step=step, start=start, stop=stop, **kwargs) ts.time = ti ts.value = vi ms.series_list[idx] = ts else: raise NameError('Unknown methods; no action taken') return ms def correlation(self, target=None, timespan=None, alpha=0.05, settings=None, fdr_kwargs=None, common_time_kwargs=None, mute_pbar=False, seed=None): ''' Calculate the correlation between a MultipleSeries and a target Series If the target Series is not specified, then the 1st member of MultipleSeries will be the target Parameters ---------- target : pyleoclim.Series, optional A pyleoclim Series object. timespan : tuple The time interval over which to perform the calculation alpha : float The significance level (0.05 by default) fdr_kwargs : dict Parameters for the FDR function settings : dict Parameters for the correlation function, including: nsim : int the number of simulations (default: 1000) method : str, {'ttest','isopersistent','isospectral' (default)} method for significance testing common_time_kwargs : dict Parameters for the method MultipleSeries.common_time() seed : float or int random seed for isopersistent and isospectral methods mute_pbar : bool If True, the progressbar will be muted. Default is False. Returns ------- corr : pyleoclim.ui.CorrEns the result object, see `pyleoclim.ui.CorrEns` See also -------- pyleoclim.utils.correlation.corr_sig : Correlation function pyleoclim.utils.correlation.fdr : FDR function pyleoclim.ui.CorrEns : the correlation ensemble object Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo from pyleoclim.utils.tsmodel import colored_noise import numpy as np nt = 100 t0 = np.arange(nt) v0 = colored_noise(alpha=1, t=t0) noise = np.random.normal(loc=0, scale=1, size=nt) ts0 = pyleo.Series(time=t0, value=v0) ts1 = pyleo.Series(time=t0, value=v0+noise) ts2 = pyleo.Series(time=t0, value=v0+2*noise) ts3 = pyleo.Series(time=t0, value=v0+1/2*noise) ts_list = [ts1, ts2, ts3] ms = pyleo.MultipleSeries(ts_list) ts_target = ts0 # set an arbitrary randome seed to fix the result corr_res = ms.correlation(ts_target, settings={'nsim': 20}, seed=2333) print(corr_res) # set an arbitrary randome seed to fix the result corr_res = ms.correlation(settings={'nsim': 20}, seed=2333) print(corr_res) ''' r_list = [] signif_list = [] p_list = [] if target is None: target = self.series_list[0] for idx, ts in tqdm(enumerate(self.series_list), total=len(self.series_list), disable=mute_pbar): corr_res = ts.correlation(target, timespan=timespan, alpha=alpha, settings=settings, common_time_kwargs=common_time_kwargs, seed=seed) r_list.append(corr_res.r) signif_list.append(corr_res.signif) p_list.append(corr_res.p) r_list = np.array(r_list) signif_fdr_list = [] fdr_kwargs = {} if fdr_kwargs is None else fdr_kwargs.copy() args = {} args.update(fdr_kwargs) for i in range(np.size(signif_list)): signif_fdr_list.append(False) fdr_res = corrutils.fdr(p_list, **fdr_kwargs) if fdr_res is not None: for i in fdr_res: signif_fdr_list[i] = True corr_ens = CorrEns(r_list, p_list, signif_list, signif_fdr_list, alpha) return corr_ens # def mssa(self, M, MC=0, f=0.5): # data = [] # for val in self.series_list: # data.append(val.value) # data = np.transpose(np.asarray(data)) # res = decomposition.mssa(data, M=M, MC=MC, f=f) # return res def equal_lengths(self): ''' Test whether all series in object have equal length Parameters ---------- None Returns ------- flag : boolean lengths : list containing the lengths of the series in object ''' lengths = [] for ts in self.series_list: lengths.append(len(ts.value)) L = lengths[0] r = lengths[1:] flag = all (l==L for l in r) return flag, lengths def pca(self,weights=None,missing='fill-em',tol_em=5e-03, max_em_iter=100,**pca_kwargs): '''Principal Component Analysis (Empirical Orthogonal Functions) Decomposition of dataset ys in terms of orthogonal basis functions. Tolerant to missing values, infilled by an EM algorithm. Requires ncomp to be less than the number of missing values. Do make sure the time axes are aligned, however! (e.g. use `common_time()`) Algorithm from statsmodels: https://www.statsmodels.org/stable/generated/statsmodels.multivariate.pca.PCA.html Parameters ---------- weights : ndarray, optional Series weights to use after transforming data according to standardize or demean when computing the principal components. missing : {str, None} Method for missing data. Choices are: * 'drop-row' - drop rows with missing values. * 'drop-col' - drop columns with missing values. * 'drop-min' - drop either rows or columns, choosing by data retention. * 'fill-em' - use EM algorithm to fill missing value. ncomp should be set to the number of factors required. * `None` raises if data contains NaN values. tol_em : float Tolerance to use when checking for convergence of the EM algorithm. max_em_iter : int Maximum iterations for the EM algorithm. Attributes ---------- res: pyleoclim.ui.SpatialDecomp the result object, see `pyleoclim.ui.SpatialDecomp` Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) tslist = data.to_LipdSeriesList() tslist = tslist[2:] # drop the first two series which only concerns age and depth ms = pyleo.MultipleSeries(tslist).common_time() res = ms.pca() # carry out PCA res.screeplot() # plot the eigenvalue spectrum res.modeplot() # plot the first mode ''' flag, lengths = self.equal_lengths() if flag==False: print('All Time Series should be of same length. Apply common_time() first') else: # if all series have equal length p = len(lengths) n = lengths[0] ys = np.empty((n,p)) for j in range(p): ys[:,j] = self.series_list[j].value # fill in data matrix nc = min(ys.shape) # number of components to return out = PCA(ys,weights=weights,missing=missing,tol_em=tol_em, max_em_iter=max_em_iter,**pca_kwargs) # compute effective sample size PC1 = out.factors[:,0] neff = tsutils.eff_sample_size(PC1) # compute percent variance pctvar = out.eigenvals**2/np.sum(out.eigenvals**2)*100 # assign result to SpatiamDecomp class # Note: need to grab coordinates from Series or LiPDSeries res = SpatialDecomp(name='PCA', time = self.series_list[0].time, neff= neff, pcs = out.scores, pctvar = pctvar, locs = None, eigvals = out.eigenvals, eigvecs = out.eigenvecs) return res # def mcpca(self,nMC=200,**pca_kwargs): # ''' Monte Carlo Principal Component Analysis # (UNDER REPAIR) # Parameters # ---------- # nMC : int # number of Monte Carlo simulations # pca_kwargs : tuple # Returns # ------- # res : dictionary containing: # - eigval : eigenvalues (nrec,) # - eig_ar1 : eigenvalues of the AR(1) ensemble (nrec, nMC) # - pcs : PC series of all components (nrec, nt) # - eofs : EOFs of all components (nrec, nrec) # References: # ---------- # Deininger, M., McDermott, F., Mudelsee, M. et al. (2017): Coherency of late Holocene # European speleothem δ18O records linked to North Atlantic Ocean circulation. # Climate Dynamics, 49, 595–618. https://doi.org/10.1007/s00382-016-3360-8 # See also # -------- # pyleoclim.utils.decomposition.mcpca: Monte Carlo PCA # Examples # -------- # .. ipython:: python # :okwarning: # import pyleoclim as pyleo # url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' # data = pyleo.Lipd(usr_path = url) # tslist = data.to_LipdSeriesList() # tslist = tslist[2:] # drop the first two series which only concerns age and depth # ms = pyleo.MultipleSeries(tslist) # # msc = ms.common_time() # # res = msc.pca(nMC=20) # ''' # flag, lengths = self.equal_lengths() # if flag==False: # print('All Time Series should be of same length. Apply common_time() first') # else: # if all series have equal length # p = len(lengths) # n = lengths[0] # ys = np.empty((n,p)) # for j in range(p): # ys[:,j] = self.series_list[j].value # res = decomposition.mcpca(ys, nMC, **pca_kwargs) # return res def bin(self, **kwargs): '''Aligns the time axes of a MultipleSeries object, via binning. This is critical for workflows that need to assume a common time axis for the group of series under consideration. The common time axis is characterized by the following parameters: start : the latest start date of the bunch (maximin of the minima) stop : the earliest stop date of the bunch (minimum of the maxima) step : The representative spacing between consecutive values (mean of the median spacings) This is a special case of the common_time function. Parameters ---------- kwargs : dict Arguments for the binning function. See pyleoclim.utils.tsutils.bin Returns ------- ms : pyleoclim.MultipleSeries The MultipleSeries objects with all series aligned to the same time axis. See also -------- pyleoclim.core.ui.MultipleSeries.common_time: Base function on which this operates pyleoclim.utils.tsutils.bin: Underlying binning function pyleoclim.core.ui.Series.bin: Bin function for Series object Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) tslist = data.to_LipdSeriesList() tslist = tslist[2:] # drop the first two series which only concerns age and depth ms = pyleo.MultipleSeries(tslist) msbin = ms.bin() ''' ms = self.copy() ms = ms.common_time(method = 'bin', **kwargs) return ms def gkernel(self, **kwargs): ''' Aligns the time axes of a MultipleSeries object, via Gaussian kernel. This is critical for workflows that need to assume a common time axis for the group of series under consideration. The common time axis is characterized by the following parameters: start : the latest start date of the bunch (maximin of the minima) stop : the earliest stop date of the bunch (minimum of the maxima) step : The representative spacing between consecutive values (mean of the median spacings) This is a special case of the common_time function. Parameters ---------- kwargs : dict Arguments for gkernel. See pyleoclim.utils.tsutils.gkernel for details. Returns ------- ms : pyleoclim.MultipleSeries The MultipleSeries objects with all series aligned to the same time axis. See also -------- pyleoclim.core.ui.MultipleSeries.common_time: Base function on which this operates pyleoclim.utils.tsutils.gkernel: Underlying kernel module Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) tslist = data.to_LipdSeriesList() tslist = tslist[2:] # drop the first two series which only concerns age and depth ms = pyleo.MultipleSeries(tslist) msk = ms.gkernel() ''' ms = self.copy() ms = ms.common_time(method = 'gkernel', **kwargs) return ms def interp(self, **kwargs): ''' Aligns the time axes of a MultipleSeries object, via interpolation. This is critical for workflows that need to assume a common time axis for the group of series under consideration. The common time axis is characterized by the following parameters: start : the latest start date of the bunch (maximin of the minima) stop : the earliest stop date of the bunch (minimum of the maxima) step : The representative spacing between consecutive values (mean of the median spacings) This is a special case of the common_time function. Parameters ---------- kwargs: keyword arguments (dictionary) for the interpolation method Returns ------- ms : pyleoclim.MultipleSeries The MultipleSeries objects with all series aligned to the same time axis. See also -------- pyleoclim.core.ui.MultipleSeries.common_time: Base function on which this operates pyleoclim.utils.tsutils.interp: Underlying interpolation function pyleoclim.core.ui.Series.interp: Interpolation function for Series object Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) tslist = data.to_LipdSeriesList() tslist = tslist[2:] # drop the first two series which only concerns age and depth ms = pyleo.MultipleSeries(tslist) msinterp = ms.interp() ''' ms = self.copy() ms = ms.common_time(method='interp', **kwargs) return ms def detrend(self,method='emd',**kwargs): '''Detrend timeseries Parameters ---------- method : str, optional The method for detrending. The default is 'emd'. Options include: * linear: the result of a linear least-squares fit to y is subtracted from y. * constant: only the mean of data is subtrated. * "savitzky-golay", y is filtered using the Savitzky-Golay filters and the resulting filtered series is subtracted from y. * "emd" (default): Empirical mode decomposition. The last mode is assumed to be the trend and removed from the series **kwargs : dict Relevant arguments for each of the methods. Returns ------- ms : pyleoclim.MultipleSeries The detrended timeseries See also -------- pyleoclim.core.ui.Series.detrend : Detrending for a single series pyleoclim.utils.tsutils.detrend : Detrending function ''' ms=self.copy() for idx,item in enumerate(ms.series_list): s=item.copy() v_mod=tsutils.detrend(item.value,x=item.time,method=method,**kwargs) s.value=v_mod ms.series_list[idx]=s return ms def spectral(self, method='lomb_scargle', settings=None, mute_pbar=False, freq_method='log', freq_kwargs=None, label=None, verbose=False, scalogram_list=None): ''' Perform spectral analysis on the timeseries Parameters ---------- method : str {'wwz', 'mtm', 'lomb_scargle', 'welch', 'periodogram'} freq_method : str {'log','scale', 'nfft', 'lomb_scargle', 'welch'} freq_kwargs : dict Arguments for frequency vector settings : dict Arguments for the specific spectral method label : str Label for the PSD object verbose : bool If True, will print warning messages if there is any mute_pbar : {True, False} Mute the progress bar. Default is False. scalogram_list : pyleoclim.MultipleScalogram object, optional Multiple scalogram object containing pre-computed scalograms to use when calculating spectra, only works with wwz Returns ------- psd : pyleoclim.MultiplePSD A Multiple PSD object See also -------- pyleoclim.utils.spectral.mtm : Spectral analysis using the Multitaper approach pyleoclim.utils.spectral.lomb_scargle : Spectral analysis using the Lomb-Scargle method pyleoclim.utils.spectral.welch: Spectral analysis using the Welch segement approach pyleoclim.utils.spectral.periodogram: Spectral anaysis using the basic Fourier transform pyleoclim.utils.spectral.wwz_psd : Spectral analysis using the Wavelet Weighted Z transform pyleoclim.utils.wavelet.make_freq_vector : Functions to create the frequency vector pyleoclim.utils.tsutils.detrend : Detrending function pyleoclim.core.ui.Series.spectral : Spectral analysis for a single timeseries pyleoclim.core.ui.PSD : PSD object pyleoclim.core.ui.MultiplePSD : Multiple PSD object ''' settings = {} if settings is None else settings.copy() psd_list = [] if method == 'wwz' and scalogram_list: scalogram_list_len = len(scalogram_list.scalogram_list) series_len = len(self.series_list) #In the case where the scalogram list and series list are the same we can re-use scalograms in a one to one fashion #OR if the scalogram list is longer than the series list we use as many scalograms from the scalogram list as we need if scalogram_list_len >= series_len: for idx, s in enumerate(tqdm(self.series_list, desc='Performing spectral analysis on individual series', position=0, leave=True, disable=mute_pbar)): psd_tmp = s.spectral(method=method, settings=settings, freq_method=freq_method, freq_kwargs=freq_kwargs, label=label, verbose=verbose,scalogram = scalogram_list.scalogram_list[idx]) psd_list.append(psd_tmp) #If the scalogram list isn't as long as the series list, we re-use all the scalograms we can and then recalculate the rest elif scalogram_list_len < series_len: for idx, s in enumerate(tqdm(self.series_list, desc='Performing spectral analysis on individual series', position=0, leave=True, disable=mute_pbar)): if idx < scalogram_list_len: psd_tmp = s.spectral(method=method, settings=settings, freq_method=freq_method, freq_kwargs=freq_kwargs, label=label, verbose=verbose,scalogram = scalogram_list.scalogram_list[idx]) psd_list.append(psd_tmp) else: psd_tmp = s.spectral(method=method, settings=settings, freq_method=freq_method, freq_kwargs=freq_kwargs, label=label, verbose=verbose) psd_list.append(psd_tmp) else: for s in tqdm(self.series_list, desc='Performing spectral analysis on individual series', position=0, leave=True, disable=mute_pbar): psd_tmp = s.spectral(method=method, settings=settings, freq_method=freq_method, freq_kwargs=freq_kwargs, label=label, verbose=verbose) psd_list.append(psd_tmp) psds = MultiplePSD(psd_list=psd_list) return psds def wavelet(self, method='wwz', settings={}, freq_method='log', ntau=None, freq_kwargs=None, verbose=False, mute_pbar=False): '''Wavelet analysis Parameters ---------- method : {wwz, cwt} Whether to use the wwz method for unevenly spaced timeseries or traditional cwt (from pywavelets) settings : dict, optional Settings for the particular method. The default is {}. freq_method : str {'log', 'scale', 'nfft', 'lomb_scargle', 'welch'} freq_kwargs : dict Arguments for frequency vector ntau : int The length of the time shift points that determins the temporal resolution of the result. If None, it will be either the length of the input time axis, or at most 100. settings : dict Arguments for the specific spectral method verbose : bool If True, will print warning messages if there is any mute_pbar : bool, optional Whether to mute the progress bar. The default is False. Returns ------- scals : pyleoclim.MultipleScalograms See also -------- pyleoclim.utils.wavelet.wwz : wwz function pyleoclim.utils.wavelet.make_freq_vector : Functions to create the frequency vector pyleoclim.utils.tsutils.detrend : Detrending function pyleoclim.core.ui.Series.wavelet : wavelet analysis on single object pyleoclim.core.ui.MultipleScalogram : Multiple Scalogram object ''' settings = {} if settings is None else settings.copy() scal_list = [] for s in tqdm(self.series_list, desc='Performing wavelet analysis on individual series', position=0, leave=True, disable=mute_pbar): scal_tmp = s.wavelet(method=method, settings=settings, freq_method=freq_method, freq_kwargs=freq_kwargs, verbose=verbose, ntau=ntau) scal_list.append(scal_tmp) scals = MultipleScalogram(scalogram_list=scal_list) return scals def plot(self, figsize=[10, 4], marker=None, markersize=None, linestyle=None, linewidth=None, colors=None, cmap='tab10', norm=None, xlabel=None, ylabel=None, title=None, legend=True, plot_kwargs=None, lgd_kwargs=None, savefig_settings=None, ax=None, mute=False, invert_xaxis=False): '''Plot multiple timeseries on the same axis Parameters ---------- figsize : list, optional Size of the figure. The default is [10, 4]. marker : str, optional marker type. The default is None. markersize : float, optional marker size. The default is None. colors : a list of, or one, Python supported color code (a string of hex code or a tuple of rgba values) Colors for plotting. If None, the plotting will cycle the 'tab10' colormap; if only one color is specified, then all curves will be plotted with that single color; if a list of colors are specified, then the plotting will cycle that color list. cmap : str The colormap to use when "colors" is None. norm : matplotlib.colors.Normalize like The nomorlization for the colormap. If None, a linear normalization will be used. linestyle : str, optional Line style. The default is None. linewidth : float, optional The width of the line. The default is None. xlabel : str, optional x-axis label. The default is None. ylabel : str, optional y-axis label. The default is None. title : str, optional Title. The default is None. legend : bool, optional Wether the show the legend. The default is True. plot_kwargs : dict, optional Plot parameters. The default is None. lgd_kwargs : dict, optional Legend parameters. The default is None. savefig_settings : dictionary, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} The default is None. ax : matplotlib.ax, optional The matplotlib axis onto which to return the figure. The default is None. mute : bool, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) invert_xaxis : bool, optional if True, the x-axis of the plot will be inverted Returns ------- fig, ax ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() plot_kwargs = {} if plot_kwargs is None else plot_kwargs.copy() lgd_kwargs = {} if lgd_kwargs is None else lgd_kwargs.copy() if ax is None: fig, ax = plt.subplots(figsize=figsize) if ylabel is None: consistent_ylabels = True time_label, value_label = self.series_list[0].make_labels() for s in self.series_list[1:]: time_label_tmp, value_label_tmp = s.make_labels() if value_label_tmp != value_label: consistent_ylabels = False if consistent_ylabels: ylabel = value_label else: ylabel = 'value' for idx, s in enumerate(self.series_list): if colors is None: cmap_obj = plt.get_cmap(cmap) if hasattr(cmap_obj, 'colors'): nc = len(cmap_obj.colors) else: nc = len(self.series_list) if norm is None: norm = mpl.colors.Normalize(vmin=0, vmax=nc-1) clr = cmap_obj(norm(idx%nc)) elif type(colors) is str: clr = colors elif type(colors) is list: nc = len(colors) clr = colors[idx%nc] else: raise TypeError('"colors" should be a list of, or one, Python supported color code (a string of hex code or a tuple of rgba values)') ax = s.plot( figsize=figsize, marker=marker, markersize=markersize, color=clr, linestyle=linestyle, linewidth=linewidth, label=s.label, xlabel=xlabel, ylabel=ylabel, title=title, legend=legend, lgd_kwargs=lgd_kwargs, plot_kwargs=plot_kwargs, ax=ax, ) if invert_xaxis: ax.invert_xaxis() if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax def stackplot(self, figsize=None, savefig_settings=None, xlim=None, fill_between_alpha=0.2, colors=None, cmap='tab10', norm=None, labels='auto', spine_lw=1.5, grid_lw=0.5, font_scale=0.8, label_x_loc=-0.15, v_shift_factor=3/4, linewidth=1.5, plot_kwargs=None, mute=False): ''' Stack plot of multiple series Note that the plotting style is uniquely designed for this one and cannot be properly reset with `pyleoclim.set_style()`. Parameters ---------- figsize : list Size of the figure. colors : a list of, or one, Python supported color code (a string of hex code or a tuple of rgba values) Colors for plotting. If None, the plotting will cycle the 'tab10' colormap; if only one color is specified, then all curves will be plotted with that single color; if a list of colors are specified, then the plotting will cycle that color list. cmap : str The colormap to use when "colors" is None. norm : matplotlib.colors.Normalize like The nomorlization for the colormap. If None, a linear normalization will be used. labels: None, 'auto' or list If None, doesn't add labels to the subplots If 'auto', uses the labels passed during the creation of pyleoclim.Series If list, pass a list of strings for each labels. Default is 'auto' savefig_settings : dictionary the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} The default is None. xlim : list The x-axis limit. fill_between_alpha : float The transparency for the fill_between shades. spine_lw : float The linewidth for the spines of the axes. grid_lw : float The linewidth for the gridlines. linewidth : float The linewidth for the curves. font_scale : float The scale for the font sizes. Default is 0.8. label_x_loc : float The x location for the label of each curve. v_shift_factor : float The factor for the vertical shift of each axis. The default value 3/4 means the top of the next axis will be located at 3/4 of the height of the previous one. plot_kwargs: dict or list of dict Arguments to further customize the plot from matplotlib.pyplot.plot. Dictionary: Arguments will be applied to all lines in the stackplots List of dictionary: Allows to customize one line at a time. mute : {True,False} if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) Returns ------- fig, ax Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) tslist = data.to_LipdSeriesList() tslist = tslist[2:] # drop the first two series which only concerns age and depth ms = pyleo.MultipleSeries(tslist) @savefig mts_stackplot.png fig, ax = ms.stackplot() pyleo.closefig(fig) Let's change the labels on the left .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) sst = d.to_LipdSeries(number=5) d18Osw = d.to_LipdSeries(number=3) ms = pyleo.MultipleSeries([sst,d18Osw]) @savefig mts_stackplot_customlabels.png fig, ax = ms.stackplot(labels=['sst','d18Osw']) pyleo.closefig(fig) And let's remove them completely .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) sst = d.to_LipdSeries(number=5) d18Osw = d.to_LipdSeries(number=3) ms = pyleo.MultipleSeries([sst,d18Osw]) @savefig mts_stackplot_nolabels.png fig, ax = ms.stackplot(labels=None) pyleo.closefig(fig) Now, let's add markers to the timeseries. .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) sst = d.to_LipdSeries(number=5) d18Osw = d.to_LipdSeries(number=3) ms = pyleo.MultipleSeries([sst,d18Osw]) @savefig mts_stackplot_samemarkers.png fig, ax = ms.stackplot(labels=None, plot_kwargs={'marker':'o'}) pyleo.closefig(fig) But I really want to use different markers .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) sst = d.to_LipdSeries(number=5) d18Osw = d.to_LipdSeries(number=3) ms = pyleo.MultipleSeries([sst,d18Osw]) @savefig mts_stackplot_differentmarkers.png fig, ax = ms.stackplot(labels=None, plot_kwargs=[{'marker':'o'},{'marker':'^'}]) pyleo.closefig(fig) ''' current_style = deepcopy(mpl.rcParams) plotting.set_style('journal', font_scale=font_scale) savefig_settings = {} if savefig_settings is None else savefig_settings.copy() n_ts = len(self.series_list) if type(labels)==list: if len(labels) != n_ts: raise ValueError("The length of the label list should match the number of timeseries to be plotted") # Deal with plotting arguments if type(plot_kwargs)==dict: plot_kwargs = [plot_kwargs]*n_ts if plot_kwargs is not None and len(plot_kwargs) != n_ts: raise ValueError("When passing a list of dictionaries for kwargs arguments, the number of items should be the same as the number of timeseries") fig = plt.figure(figsize=figsize) if xlim is None: time_min = np.inf time_max = -np.inf for ts in self.series_list: if np.min(ts.time) <= time_min: time_min = np.min(ts.time) if np.max(ts.time) >= time_max: time_max = np.max(ts.time) xlim = [time_min, time_max] ax = {} left = 0 width = 1 height = 1/n_ts bottom = 1 for idx, ts in enumerate(self.series_list): if colors is None: cmap_obj = plt.get_cmap(cmap) if hasattr(cmap_obj, 'colors'): nc = len(cmap_obj.colors) else: nc = len(self.series_list) if norm is None: norm = mpl.colors.Normalize(vmin=0, vmax=nc-1) clr = cmap_obj(norm(idx%nc)) elif type(colors) is str: clr = colors elif type(colors) is list: nc = len(colors) clr = colors[idx%nc] else: raise TypeError('"colors" should be a list of, or one, Python supported color code (a string of hex code or a tuple of rgba values)') #deal with other plotting arguments if plot_kwargs is None: p_kwargs = {} else: p_kwargs = plot_kwargs[idx] bottom -= height*v_shift_factor ax[idx] = fig.add_axes([left, bottom, width, height]) ax[idx].plot(ts.time, ts.value, color=clr, lw=linewidth,**p_kwargs) ax[idx].patch.set_alpha(0) ax[idx].set_xlim(xlim) time_label, value_label = ts.make_labels() ax[idx].set_ylabel(value_label, weight='bold') mu = np.mean(ts.value) std = np.std(ts.value) ylim = [mu-4*std, mu+4*std] ax[idx].fill_between(ts.time, ts.value, y2=mu, alpha=fill_between_alpha, color=clr) trans = transforms.blended_transform_factory(ax[idx].transAxes, ax[idx].transData) if labels == 'auto': if ts.label is not None: ax[idx].text(label_x_loc, mu, ts.label, horizontalalignment='right', transform=trans, color=clr, weight='bold') elif type(labels) ==list: ax[idx].text(label_x_loc, mu, labels[idx], horizontalalignment='right', transform=trans, color=clr, weight='bold') elif labels==None: pass ax[idx].set_ylim(ylim) ax[idx].set_yticks(ylim) ax[idx].yaxis.set_major_formatter(FormatStrFormatter('%.1f')) ax[idx].grid(False) if idx % 2 == 0: ax[idx].spines['left'].set_visible(True) ax[idx].spines['left'].set_linewidth(spine_lw) ax[idx].spines['left'].set_color(clr) ax[idx].spines['right'].set_visible(False) ax[idx].yaxis.set_label_position('left') ax[idx].yaxis.tick_left() else: ax[idx].spines['left'].set_visible(False) ax[idx].spines['right'].set_visible(True) ax[idx].spines['right'].set_linewidth(spine_lw) ax[idx].spines['right'].set_color(clr) ax[idx].yaxis.set_label_position('right') ax[idx].yaxis.tick_right() ax[idx].yaxis.label.set_color(clr) ax[idx].tick_params(axis='y', colors=clr) ax[idx].spines['top'].set_visible(False) ax[idx].spines['bottom'].set_visible(False) ax[idx].tick_params(axis='x', which='both', length=0) ax[idx].set_xlabel('') ax[idx].set_xticklabels([]) xt = ax[idx].get_xticks()[1:-1] for x in xt: ax[idx].axvline(x=x, color='lightgray', linewidth=grid_lw, ls='-', zorder=-1) ax[idx].axhline(y=mu, color='lightgray', linewidth=grid_lw, ls='-', zorder=-1) bottom -= height*(1-v_shift_factor) ax[n_ts] = fig.add_axes([left, bottom, width, height]) ax[n_ts].set_xlabel(time_label) ax[n_ts].spines['left'].set_visible(False) ax[n_ts].spines['right'].set_visible(False) ax[n_ts].spines['bottom'].set_visible(True) ax[n_ts].spines['bottom'].set_linewidth(spine_lw) ax[n_ts].set_yticks([]) ax[n_ts].patch.set_alpha(0) ax[n_ts].set_xlim(xlim) ax[n_ts].grid(False) ax[n_ts].tick_params(axis='x', which='both', length=3.5) xt = ax[n_ts].get_xticks()[1:-1] for x in xt: ax[n_ts].axvline(x=x, color='lightgray', linewidth=grid_lw, ls='-', zorder=-1) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) # reset the plotting style mpl.rcParams.update(current_style) return fig, ax else: plotting.showfig(fig) # reset the plotting style mpl.rcParams.update(current_style) return ax class SurrogateSeries(MultipleSeries): ''' Object containing surrogate timeseries, usually obtained through recursive modeling (e.g., AR1) Surrogate Series is a child of MultipleSeries. All methods available for MultipleSeries are available for surrogate series. ''' def __init__(self, series_list, surrogate_method=None, surrogate_args=None): self.series_list = series_list self.surrogate_method = surrogate_method self.surrogate_args = surrogate_args class EnsembleSeries(MultipleSeries): ''' EnsembleSeries object The EnsembleSeries object is a child of the MultipleSeries object, that is, a special case of MultipleSeries, aiming for ensembles of similar series. Ensembles usually arise from age modeling or Bayesian calibrations. All members of an EnsembleSeries object are assumed to share identical labels and units. All methods available for MultipleSeries are available for EnsembleSeries. Some functions were modified for the special case of ensembles. ''' def __init__(self, series_list): self.series_list = series_list def make_labels(self): ''' Initialization of labels Returns ------- time_header : str Label for the time axis value_header : str Label for the value axis ''' ts_list = self.series_list if ts_list[0].time_name is not None: time_name_str = ts_list[0].time_name else: time_name_str = 'time' if ts_list[0].value_name is not None: value_name_str = ts_list[0].value_name else: value_name_str = 'value' if ts_list[0].value_unit is not None: value_header = f'{value_name_str} [{ts_list[0].value_unit}]' else: value_header = f'{value_name_str}' if ts_list[0].time_unit is not None: time_header = f'{time_name_str} [{ts_list[0].time_unit}]' else: time_header = f'{time_name_str}' return time_header, value_header def quantiles(self, qs=[0.05, 0.5, 0.95]): '''Calculate quantiles of an EnsembleSeries object Parameters ---------- qs : list, optional List of quantiles to consider for the calculation. The default is [0.05, 0.5, 0.95]. Returns ------- ens_qs : pyleoclim.EnsembleSeries ''' time = np.copy(self.series_list[0].time) vals = [] for ts in self.series_list: if not np.array_equal(ts.time, time): raise ValueError('Time axis not consistent across the ensemble!') vals.append(ts.value) vals = np.array(vals) ens_qs = mquantiles(vals, qs, axis=0) ts_list = [] for i, quant in enumerate(ens_qs): ts = Series(time=time, value=quant, label=f'{qs[i]*100:g}%') ts_list.append(ts) ens_qs = EnsembleSeries(series_list=ts_list) return ens_qs def correlation(self, target=None, timespan=None, alpha=0.05, settings=None, fdr_kwargs=None, common_time_kwargs=None, mute_pbar=False, seed=None): ''' Calculate the correlation between an EnsembleSeries object to a target. If the target is not specified, then the 1st member of the ensemble will be the target Note that the FDR approach is applied by default to determine the significance of the p-values (more information in See Also below). Parameters ---------- target : pyleoclim.Series or pyleoclim.EnsembleSeries, optional A pyleoclim Series object or EnsembleSeries object. When the target is also an EnsembleSeries object, then the calculation of correlation is performed in a one-to-one sense, and the ourput list of correlation values and p-values will be the size of the series_list of the self object. That is, if the self object contains n Series, and the target contains n+m Series, then only the first n Series from the object will be used for the calculation; otherwise, if the target contains only n-m Series, then the first m Series in the target will be used twice in sequence. timespan : tuple The time interval over which to perform the calculation alpha : float The significance level (0.05 by default) settings : dict Parameters for the correlation function, including: nsim : int the number of simulations (default: 1000) method : str, {'ttest','isopersistent','isospectral' (default)} method for significance testing fdr_kwargs : dict Parameters for the FDR function common_time_kwargs : dict Parameters for the method MultipleSeries.common_time() mute_pbar : bool If True, the progressbar will be muted. Default is False. seed : float or int random seed for isopersistent and isospectral methods Returns ------- corr : pyleoclim.ui.CorrEns the result object, see `pyleoclim.ui.CorrEns` See also -------- pyleoclim.utils.correlation.corr_sig : Correlation function pyleoclim.utils.correlation.fdr : FDR function pyleoclim.ui.CorrEns : the correlation ensemble object Examples -------- .. ipython:: python :okwarning: import pyleoclim as pyleo import numpy as np from pyleoclim.utils.tsmodel import colored_noise nt = 100 t0 = np.arange(nt) v0 = colored_noise(alpha=1, t=t0) noise = np.random.normal(loc=0, scale=1, size=nt) ts0 = pyleo.Series(time=t0, value=v0) ts1 = pyleo.Series(time=t0, value=v0+noise) ts2 = pyleo.Series(time=t0, value=v0+2*noise) ts3 = pyleo.Series(time=t0, value=v0+1/2*noise) ts_list1 = [ts0, ts1] ts_list2 = [ts2, ts3] ts_ens = pyleo.EnsembleSeries(ts_list1) ts_target = pyleo.EnsembleSeries(ts_list2) # set an arbitrary randome seed to fix the result corr_res = ts_ens.correlation(ts_target, seed=2333) print(corr_res) ''' if target is None: target = self.series_list[0] r_list = [] p_list = [] signif_list = [] for idx, ts1 in tqdm(enumerate(self.series_list), total=len(self.series_list), disable=mute_pbar): if hasattr(target, 'series_list'): nEns = np.size(target.series_list) if idx < nEns: value2 = target.series_list[idx].value time2 = target.series_list[idx].time else: value2 = target.series_list[idx-nEns].value time2 = target.series_list[idx-nEns].time else: value2 = target.value time2 = target.time ts2 = Series(time=time2, value=value2, verbose=idx==0) corr_res = ts1.correlation(ts2, timespan=timespan, settings=settings, common_time_kwargs=common_time_kwargs, seed=seed) r_list.append(corr_res.r) signif_list.append(corr_res.signif) p_list.append(corr_res.p) r_list = np.array(r_list) p_list = np.array(p_list) signif_fdr_list = [] fdr_kwargs = {} if fdr_kwargs is None else fdr_kwargs.copy() args = {} args.update(fdr_kwargs) for i in range(np.size(signif_list)): signif_fdr_list.append(False) fdr_res = corrutils.fdr(p_list, **fdr_kwargs) if fdr_res is not None: for i in fdr_res: signif_fdr_list[i] = True corr_ens = CorrEns(r_list, p_list, signif_list, signif_fdr_list, alpha) return corr_ens def plot_traces(self, figsize=[10, 4], xlabel=None, ylabel=None, title=None, num_traces=10, seed=None, xlim=None, ylim=None, linestyle='-', savefig_settings=None, ax=None, plot_legend=True, color=sns.xkcd_rgb['pale red'], lw=0.5, alpha=0.3, lgd_kwargs=None, mute=False): '''Plot EnsembleSeries as a subset of traces. Parameters ---------- figsize : list, optional The figure size. The default is [10, 4]. xlabel : str, optional x-axis label. The default is None. ylabel : str, optional y-axis label. The default is None. title : str, optional Plot title. The default is None. xlim : list, optional x-axis limits. The default is None. ylim : list, optional y-axis limits. The default is None. color : str, optional Color of the traces. The default is sns.xkcd_rgb['pale red']. alpha : float, optional Transparency of the lines representing the multiple members. The default is 0.3. linestyle : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...} Set the linestyle of the line lw : float, optional Width of the lines representing the multiple members. The default is 0.5. num_traces : int, optional Number of traces to plot. The default is 10. savefig_settings : dict, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} The default is None. ax : matplotlib.ax, optional Matplotlib axis on which to return the plot. The default is None. plot_legend : bool, optional Whether to plot the legend. The default is True. lgd_kwargs : dict, optional Parameters for the legend. The default is None. mute : bool, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax. The default is False. (going to be deprecated) seed : int, optional Set the seed for the random number generator. Useful for reproducibility. The default is None. Returns ------- fig, ax Examples -------- .. ipython:: python :okwarning: :okexcept: nn = 30 # number of noise realizations nt = 500 series_list = [] signal = pyleo.gen_ts(model='colored_noise',nt=nt,alpha=1.0).standardize() noise = np.random.randn(nt,nn) for idx in range(nn): # noise ts = pyleo.Series(time=signal.time, value=signal.value+noise[:,idx]) series_list.append(ts) ts_ens = pyleo.EnsembleSeries(series_list) fig, ax = ts_ens.plot_traces(alpha=0.2,num_traces=8) pyleo.closefig(fig) ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() lgd_kwargs = {} if lgd_kwargs is None else lgd_kwargs.copy() # generate default axis labels time_label, value_label = self.make_labels() if xlabel is None: xlabel = time_label if ylabel is None: ylabel = value_label if ax is None: fig, ax = plt.subplots(figsize=figsize) if num_traces > 0: if seed is not None: np.random.seed(seed) nts = np.size(self.series_list) random_draw_idx = np.random.choice(nts, num_traces) for idx in random_draw_idx: self.series_list[idx].plot(xlabel=xlabel, ylabel=ylabel, zorder=99, linewidth=lw, xlim=xlim, ylim=ylim, ax=ax, color=color, alpha=alpha,linestyle='-') ax.plot(np.nan, np.nan, color=color, label=f'example members (n={num_traces})',linestyle='-') if title is not None: ax.set_title(title) if plot_legend: lgd_args = {'frameon': False} lgd_args.update(lgd_kwargs) ax.legend(**lgd_args) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax def plot_envelope(self, figsize=[10, 4], qs=[0.025, 0.25, 0.5, 0.75, 0.975], xlabel=None, ylabel=None, title=None, xlim=None, ylim=None, savefig_settings=None, ax=None, plot_legend=True, curve_clr=sns.xkcd_rgb['pale red'], curve_lw=2, shade_clr=sns.xkcd_rgb['pale red'], shade_alpha=0.2, inner_shade_label='IQR', outer_shade_label='95% CI', lgd_kwargs=None, mute=False): ''' Plot EnsembleSeries as an envelope. Parameters ---------- figsize : list, optional The figure size. The default is [10, 4]. qs : list, optional The significance levels to consider. The default is [0.025, 0.25, 0.5, 0.75, 0.975] (median, interquartile range, and central 95% region) xlabel : str, optional x-axis label. The default is None. ylabel : str, optional y-axis label. The default is None. title : str, optional Plot title. The default is None. xlim : list, optional x-axis limits. The default is None. ylim : list, optional y-axis limits. The default is None. savefig_settings : dict, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} The default is None. ax : matplotlib.ax, optional Matplotlib axis on which to return the plot. The default is None. plot_legend : bool, optional Wether to plot the legend. The default is True. curve_clr : str, optional Color of the main line (median). The default is sns.xkcd_rgb['pale red']. curve_lw : str, optional Width of the main line (median). The default is 2. shade_clr : str, optional Color of the shaded envelope. The default is sns.xkcd_rgb['pale red']. shade_alpha : float, optional Transparency on the envelope. The default is 0.2. inner_shade_label : str, optional Label for the envelope. The default is 'IQR'. outer_shade_label : str, optional Label for the envelope. The default is '95\% CI'. lgd_kwargs : dict, optional Parameters for the legend. The default is None. mute : bool, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax. The default is False. (going to be deprecated) Returns ------- fig, ax Examples -------- .. ipython:: python :okwarning: :okexcept: nn = 30 # number of noise realizations nt = 500 series_list = [] signal = pyleo.gen_ts(model='colored_noise',nt=nt,alpha=1.0).standardize() noise = np.random.randn(nt,nn) for idx in range(nn): # noise ts = pyleo.Series(time=signal.time, value=signal.value+noise[:,idx]) series_list.append(ts) ts_ens = pyleo.EnsembleSeries(series_list) fig, ax = ts_ens.plot_envelope(curve_lw=1.5) pyleo.closefig(fig) ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() lgd_kwargs = {} if lgd_kwargs is None else lgd_kwargs.copy() # generate default axis labels time_label, value_label = self.make_labels() if xlabel is None: xlabel = time_label if ylabel is None: ylabel = value_label if ax is None: fig, ax = plt.subplots(figsize=figsize) ts_qs = self.quantiles(qs=qs) if inner_shade_label is None: inner_shade_label = f'{ts_qs.series_list[1].label}-{ts_qs.series_list[-2].label}' if outer_shade_label is None: outer_shade_label = f'{ts_qs.series_list[0].label}-{ts_qs.series_list[-1].label}' time = ts_qs.series_list[0].time # plot outer envelope ax.fill_between( time, ts_qs.series_list[0].value, ts_qs.series_list[4].value, color=shade_clr, alpha=shade_alpha, edgecolor=shade_clr, label=outer_shade_label, ) # plot inner envelope on top ax.fill_between( time, ts_qs.series_list[1].value, ts_qs.series_list[3].value, color=shade_clr, alpha=2*shade_alpha, edgecolor=shade_clr, label=inner_shade_label, ) # plot the median ts_qs.series_list[2].plot(xlabel=xlabel, ylabel=ylabel, linewidth=curve_lw, color=curve_clr, xlim=xlim, ylim=ylim, ax=ax, zorder=100, label = 'median' ) if title is not None: ax.set_title(title) if plot_legend: lgd_args = {'frameon': False} lgd_args.update(lgd_kwargs) ax.legend(**lgd_args) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax def stackplot(self, figsize=[5, 15], savefig_settings=None, xlim=None, fill_between_alpha=0.2, colors=None, cmap='tab10', norm=None, spine_lw=1.5, grid_lw=0.5, font_scale=0.8, label_x_loc=-0.15, v_shift_factor=3/4, linewidth=1.5, mute=False): ''' Stack plot of multiple series Note that the plotting style is uniquely designed for this one and cannot be properly reset with `pyleoclim.set_style()`. Parameters ---------- figsize : list Size of the figure. colors : a list of, or one, Python supported color code (a string of hex code or a tuple of rgba values) Colors for plotting. If None, the plotting will cycle the 'tab10' colormap; if only one color is specified, then all curves will be plotted with that single color; if a list of colors are specified, then the plotting will cycle that color list. cmap : str The colormap to use when "colors" is None. norm : matplotlib.colors.Normalize like The nomorlization for the colormap. If None, a linear normalization will be used. savefig_settings : dictionary the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} The default is None. xlim : list The x-axis limit. fill_between_alpha : float The transparency for the fill_between shades. spine_lw : float The linewidth for the spines of the axes. grid_lw : float The linewidth for the gridlines. linewidth : float The linewidth for the curves. font_scale : float The scale for the font sizes. Default is 0.8. label_x_loc : float The x location for the label of each curve. v_shift_factor : float The factor for the vertical shift of each axis. The default value 3/4 means the top of the next axis will be located at 3/4 of the height of the previous one. mute : bool if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) Returns ------- fig, ax ''' current_style = deepcopy(mpl.rcParams) plotting.set_style('journal', font_scale=font_scale) savefig_settings = {} if savefig_settings is None else savefig_settings.copy() n_ts = len(self.series_list) fig = plt.figure(figsize=figsize) if xlim is None: time_min = np.inf time_max = -np.inf for ts in self.series_list: if np.min(ts.time) <= time_min: time_min = np.min(ts.time) if np.max(ts.time) >= time_max: time_max = np.max(ts.time) xlim = [time_min, time_max] ax = {} left = 0 width = 1 height = 1/n_ts bottom = 1 for idx, ts in enumerate(self.series_list): if colors is None: cmap_obj = plt.get_cmap(cmap) if hasattr(cmap_obj, 'colors'): nc = len(cmap_obj.colors) else: nc = len(self.series_list) if norm is None: norm = mpl.colors.Normalize(vmin=0, vmax=nc-1) clr = cmap_obj(norm(idx%nc)) elif type(colors) is str: clr = colors elif type(colors) is list: nc = len(colors) clr = colors[idx%nc] else: raise TypeError('"colors" should be a list of, or one, Python supported color code (a string of hex code or a tuple of rgba values)') bottom -= height*v_shift_factor ax[idx] = fig.add_axes([left, bottom, width, height]) ax[idx].plot(ts.time, ts.value, color=clr, lw=linewidth) ax[idx].patch.set_alpha(0) ax[idx].set_xlim(xlim) time_label, value_label = ts.make_labels() ax[idx].set_ylabel(value_label, weight='bold') mu = np.mean(ts.value) std = np.std(ts.value) ylim = [mu-4*std, mu+4*std] ax[idx].fill_between(ts.time, ts.value, y2=mu, alpha=fill_between_alpha, color=clr) trans = transforms.blended_transform_factory(ax[idx].transAxes, ax[idx].transData) if ts.label is not None: ax[idx].text(label_x_loc, mu, ts.label, horizontalalignment='right', transform=trans, color=clr, weight='bold') ax[idx].set_ylim(ylim) ax[idx].set_yticks(ylim) ax[idx].yaxis.set_major_formatter(FormatStrFormatter('%.1f')) ax[idx].grid(False) if idx % 2 == 0: ax[idx].spines['left'].set_visible(True) ax[idx].spines['left'].set_linewidth(spine_lw) ax[idx].spines['left'].set_color(clr) ax[idx].spines['right'].set_visible(False) ax[idx].yaxis.set_label_position('left') ax[idx].yaxis.tick_left() else: ax[idx].spines['left'].set_visible(False) ax[idx].spines['right'].set_visible(True) ax[idx].spines['right'].set_linewidth(spine_lw) ax[idx].spines['right'].set_color(clr) ax[idx].yaxis.set_label_position('right') ax[idx].yaxis.tick_right() ax[idx].yaxis.label.set_color(clr) ax[idx].tick_params(axis='y', colors=clr) ax[idx].spines['top'].set_visible(False) ax[idx].spines['bottom'].set_visible(False) ax[idx].tick_params(axis='x', which='both', length=0) ax[idx].set_xlabel('') ax[idx].set_xticklabels([]) xt = ax[idx].get_xticks()[1:-1] for x in xt: ax[idx].axvline(x=x, color='lightgray', linewidth=grid_lw, ls='-', zorder=-1) ax[idx].axhline(y=mu, color='lightgray', linewidth=grid_lw, ls='-', zorder=-1) bottom -= height*(1-v_shift_factor) ax[n_ts] = fig.add_axes([left, bottom, width, height]) ax[n_ts].set_xlabel(time_label) ax[n_ts].spines['left'].set_visible(False) ax[n_ts].spines['right'].set_visible(False) ax[n_ts].spines['bottom'].set_visible(True) ax[n_ts].spines['bottom'].set_linewidth(spine_lw) ax[n_ts].set_yticks([]) ax[n_ts].patch.set_alpha(0) ax[n_ts].set_xlim(xlim) ax[n_ts].grid(False) ax[n_ts].tick_params(axis='x', which='both', length=3.5) xt = ax[n_ts].get_xticks()[1:-1] for x in xt: ax[n_ts].axvline(x=x, color='lightgray', linewidth=grid_lw, ls='-', zorder=-1) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) # reset the plotting style mpl.rcParams.update(current_style) return fig, ax else: # reset the plotting style mpl.rcParams.update(current_style) return ax def distplot(self, figsize=[10, 4], title=None, savefig_settings=None, ax=None, ylabel='KDE', vertical=False, edgecolor='w',mute=False, **plot_kwargs): """ Plots the distribution of the timeseries across ensembles Parameters ---------- figsize : list, optional The size of the figure. The default is [10, 4]. title : str, optional Title for the figure. The default is None. savefig_settings : dict, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"}. The default is None. ax : matplotlib.axis, optional A matplotlib axis. The default is None. ylabel : str, optional Label for the count axis. The default is 'KDE'. vertical : {True,False}, optional Whether to flip the plot vertically. The default is False. edgecolor : matplotlib.color, optional The color of the edges of the bar. The default is 'w'. mute : {True,False}, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax. The default is False. (going to be deprecated) **plot_kwargs : dict Plotting arguments for seaborn histplot: https://seaborn.pydata.org/generated/seaborn.histplot.html. See also -------- pyleoclim.utils.plotting.savefig : saving figure in Pyleoclim """ savefig_settings = {} if savefig_settings is None else savefig_settings.copy() if ax is None: fig, ax = plt.subplots(figsize=figsize) #make the data into a dataframe so we can flip the figure time_label, value_label = self.make_labels() #append all the values together for the plot for item in self.series_list: try: val=np.append(val,item.value) except: val=item.value if vertical == True: data=pd.DataFrame({'value':val}) ax = sns.histplot(data=data, y="value", ax=ax, kde=True, edgecolor=edgecolor, **plot_kwargs) ax.set_ylabel(value_label) ax.set_xlabel(ylabel) else: ax = sns.histplot(val, ax=ax, kde=True, edgecolor=edgecolor, **plot_kwargs) ax.set_xlabel(value_label) ax.set_ylabel(ylabel) if title is not None: ax.set_title(title) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax class MultiplePSD: ''' Object for multiple PSD. Used for significance level ''' def __init__(self, psd_list, beta_est_res=None): self.psd_list = psd_list self.beta_est_res = beta_est_res def copy(self): '''Copy object ''' return deepcopy(self) def quantiles(self, qs=[0.05, 0.5, 0.95], lw=[0.5, 1.5, 0.5]): '''Calculate quantiles Parameters ---------- qs : list, optional List of quantiles to consider for the calculation. The default is [0.05, 0.5, 0.95]. lw : list, optional Linewidth to use for plotting each level. Should be the same length as qs. The default is [0.5, 1.5, 0.5]. Raises ------ ValueError Frequency axis not consistent across the PSD list! Returns ------- psds : pyleoclim.MultiplePSD ''' if self.psd_list[0].timeseries is not None: period_unit = self.psd_list[0].timeseries.time_unit freq = np.copy(self.psd_list[0].frequency) amps = [] for psd in self.psd_list: if not np.array_equal(psd.frequency, freq): raise ValueError('Frequency axis not consistent across the PSD list!') amps.append(psd.amplitude) amps = np.array(amps) amp_qs = mquantiles(amps, qs, axis=0) psd_list = [] for i, amp in enumerate(amp_qs): psd_tmp = PSD(frequency=freq, amplitude=amp, label=f'{qs[i]*100:g}%', plot_kwargs={'color': 'gray', 'linewidth': lw[i]}, period_unit=period_unit) psd_list.append(psd_tmp) psds = MultiplePSD(psd_list=psd_list) return psds def beta_est(self, fmin=None, fmax=None, logf_binning_step='max', verbose=False): ''' Estimate the scaling factor beta of the each PSD from the psd_list in a log-log space Parameters ---------- fmin : float the minimum frequency edge for beta estimation; the default is the minimum of the frequency vector of the PSD obj fmax : float the maximum frequency edge for beta estimation; the default is the maximum of the frequency vector of the PSD obj logf_binning_step : str, {'max', 'first'} if 'max', then the maximum spacing of log(f) will be used as the binning step if 'first', then the 1st spacing of log(f) will be used as the binning step verbose : bool If True, will print warning messages if there is any Returns ------- new : pyleoclim.MultiplePSD New MultiplePSD object with the estimated scaling slope information, which is stored as a dictionary that includes: - beta: the scaling factor - std_err: the one standard deviation error of the scaling factor - f_binned: the binned frequency series, used as X for linear regression - psd_binned: the binned PSD series, used as Y for linear regression - Y_reg: the predicted Y from linear regression, used with f_binned for the slope curve plotting See also -------- pyleoclim.core.ui.PSD.beta_est : beta estimation for on a single PSD object ''' res_dict = {} res_dict['beta'] = [] res_dict['std_err'] = [] res_dict['f_binned'] = [] res_dict['psd_binned'] = [] res_dict['Y_reg'] = [] psd_beta_list = [] for psd_obj in self.psd_list: psd_beta = psd_obj.beta_est(fmin=fmin, fmax=fmax, logf_binning_step=logf_binning_step, verbose=verbose) psd_beta_list.append(psd_beta) res = psd_beta.beta_est_res for k in res_dict.keys(): res_dict[k].append(res[k]) new = self.copy() new.beta_est_res = res_dict new.psd_list = psd_beta_list return new def plot(self, figsize=[10, 4], in_loglog=True, in_period=True, xlabel=None, ylabel='Amplitude', title=None, xlim=None, ylim=None, savefig_settings=None, ax=None, xticks=None, yticks=None, legend=True, colors=None, cmap=None, norm=None, plot_kwargs=None, lgd_kwargs=None, mute=False): '''Plot multiple PSD on the same plot Parameters ---------- figsize : list, optional Figure size. The default is [10, 4]. in_loglog : bool, optional Whether to plot in loglog. The default is True. in_period : bool, optional Plots against periods instead of frequencies. The default is True. xlabel : str, optional x-axis label. The default is None. ylabel : str, optional y-axis label. The default is 'Amplitude'. title : str, optional Title for the figure. The default is None. xlim : list, optional Limits for the x-axis. The default is None. ylim : list, optional limits for the y-axis. The default is None. colors : a list of, or one, Python supported color code (a string of hex code or a tuple of rgba values) Colors for plotting. If None, the plotting will cycle the 'tab10' colormap; if only one color is specified, then all curves will be plotted with that single color; if a list of colors are specified, then the plotting will cycle that color list. cmap : str The colormap to use when "colors" is None. norm : matplotlib.colors.Normalize like The nomorlization for the colormap. If None, a linear normalization will be used. savefig_settings : dict, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} ax : matplotlib axis, optional The matplotlib axis object on which to retrun the figure. The default is None. xticks : list, optional x-ticks label. The default is None. yticks : list, optional y-ticks label. The default is None. legend : bool, optional Whether to plot the legend. The default is True. plot_kwargs : dictionary, optional Parameters for plot function. The default is None. lgd_kwargs : dictionary, optional Parameters for legend. The default is None. mute : bool, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax The default is False. (going to be deprecated) Returns ------- fig, ax ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() plot_kwargs = {} if plot_kwargs is None else plot_kwargs.copy() lgd_kwargs = {} if lgd_kwargs is None else lgd_kwargs.copy() if ax is None: fig, ax = plt.subplots(figsize=figsize) for idx, psd in enumerate(self.psd_list): tmp_plot_kwargs = {} if psd.plot_kwargs is not None: tmp_plot_kwargs.update(psd.plot_kwargs) tmp_plot_kwargs.update(plot_kwargs) # get color for each psd curve use_clr = False if 'color' not in tmp_plot_kwargs and 'c' not in 'tmp_plot_kwargs': use_clr = True if 'color' in tmp_plot_kwargs and tmp_plot_kwargs['color'] is None: use_clr = True if 'c' in tmp_plot_kwargs and tmp_plot_kwargs['c'] is None: use_clr = True if colors is not None or cmap is not None: use_clr = True if use_clr: # use the color based on the argument 'colors' or 'cmap' if colors is None: cmap = 'tab10' if cmap is None else cmap cmap_obj = plt.get_cmap(cmap) if hasattr(cmap_obj, 'colors'): nc = len(cmap_obj.colors) else: nc = len(self.psd_list) if norm is None: norm = mpl.colors.Normalize(vmin=0, vmax=nc-1) clr = cmap_obj(norm(idx%nc)) elif type(colors) is str: clr = colors elif type(colors) is list: nc = len(colors) clr = colors[idx%nc] else: raise TypeError('"colors" should be a list of, or one, Python supported color code (a string of hex code or a tuple of rgba values)') tmp_plot_kwargs.update({'color': clr}) ax = psd.plot( figsize=figsize, in_loglog=in_loglog, in_period=in_period, xlabel=xlabel, ylabel=ylabel, title=title, xlim=xlim, ylim=ylim, savefig_settings=savefig_settings, ax=ax, xticks=xticks, yticks=yticks, legend=legend, plot_kwargs=tmp_plot_kwargs, lgd_kwargs=lgd_kwargs, ) if title is not None: ax.set_title(title) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax def plot_envelope(self, figsize=[10, 4], qs=[0.025, 0.5, 0.975], in_loglog=True, in_period=True, xlabel=None, ylabel='Amplitude', title=None, xlim=None, ylim=None, savefig_settings=None, ax=None, xticks=None, yticks=None, plot_legend=True, curve_clr=sns.xkcd_rgb['pale red'], curve_lw=3, shade_clr=sns.xkcd_rgb['pale red'], shade_alpha=0.3, shade_label=None, lgd_kwargs=None, mute=False, members_plot_num=10, members_alpha=0.3, members_lw=1, seed=None): '''Plot mutiple PSD as an envelope. Parameters ---------- figsize : list, optional The figure size. The default is [10, 4]. qs : list, optional The significance levels to consider. The default is [0.025, 0.5, 0.975]. in_loglog : bool, optional Plot in log space. The default is True. in_period : bool, optional Whether to plot periodicity instead of frequency. The default is True. xlabel : str, optional x-axis label. The default is None. ylabel : str, optional y-axis label. The default is 'Amplitude'. title : str, optional Plot title. The default is None. xlim : list, optional x-axis limits. The default is None. ylim : list, optional y-axis limits. The default is None. savefig_settings : dict, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} The default is None. ax : matplotlib.ax, optional Matplotlib axis on which to return the plot. The default is None. xticks : list, optional xticks label. The default is None. yticks : list, optional yticks label. The default is None. plot_legend : bool, optional Wether to plot the legend. The default is True. curve_clr : str, optional Color of the main PSD. The default is sns.xkcd_rgb['pale red']. curve_lw : str, optional Width of the main PSD line. The default is 3. shade_clr : str, optional Color of the shaded envelope. The default is sns.xkcd_rgb['pale red']. shade_alpha : float, optional Transparency on the envelope. The default is 0.3. shade_label : str, optional Label for the envelope. The default is None. lgd_kwargs : dict, optional Parameters for the legend. The default is None. mute : bool, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax. The default is False. (going to be deprecated) members_plot_num : int, optional Number of individual members to plot. The default is 10. members_alpha : float, optional Transparency of the lines representing the multiple members. The default is 0.3. members_lw : float, optional With of the lines representing the multiple members. The default is 1. seed : int, optional Set the seed for random number generator. Useful for reproducibility. The default is None. Returns ------- fig, ax ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() lgd_kwargs = {} if lgd_kwargs is None else lgd_kwargs.copy() if ax is None: fig, ax = plt.subplots(figsize=figsize) if members_plot_num > 0: if seed is not None: np.random.seed(seed) npsd = np.size(self.psd_list) random_draw_idx = np.random.choice(npsd, members_plot_num) for idx in random_draw_idx: self.psd_list[idx].plot( in_loglog=in_loglog, in_period=in_period, xlabel=xlabel, ylabel=ylabel, xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks, ax=ax, color='gray', alpha=members_alpha, zorder=99, linewidth=members_lw, ) ax.plot(np.nan, np.nan, color='gray', label=f'example members (n={members_plot_num})') psd_qs = self.quantiles(qs=qs) psd_qs.psd_list[1].plot( in_loglog=in_loglog, in_period=in_period, xlabel=xlabel, ylabel=ylabel, linewidth=curve_lw, xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks, ax=ax, color=curve_clr, zorder=100 ) if in_period: x_axis = 1/psd_qs.psd_list[0].frequency else: x_axis = psd_qs.psd_list[0].frequency if shade_label is None: shade_label = f'{psd_qs.psd_list[0].label}-{psd_qs.psd_list[-1].label}' ax.fill_between( x_axis, psd_qs.psd_list[0].amplitude, psd_qs.psd_list[-1].amplitude, color=shade_clr, alpha=shade_alpha, edgecolor=shade_clr, label=shade_label, ) if title is not None: ax.set_title(title) if plot_legend: lgd_args = {'frameon': False} lgd_args.update(lgd_kwargs) ax.legend(**lgd_args) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax class MultipleScalogram: ''' Multiple Scalogram objects ''' def __init__(self, scalogram_list): self.scalogram_list = scalogram_list def copy(self): ''' Copy the object ''' return deepcopy(self) def quantiles(self, qs=[0.05, 0.5, 0.95]): '''Calculate quantiles Parameters ---------- qs : list, optional List of quantiles to consider for the calculation. The default is [0.05, 0.5, 0.95]. Raises ------ ValueError Frequency axis not consistent across the PSD list! Value Error Time axis not consistent across the scalogram list! Returns ------- scals : pyleoclim.MultipleScalogram ''' freq = np.copy(self.scalogram_list[0].frequency) time = np.copy(self.scalogram_list[0].time) coi = np.copy(self.scalogram_list[0].coi) amps = [] for scal in self.scalogram_list: if not np.array_equal(scal.frequency, freq): raise ValueError('Frequency axis not consistent across the scalogram list!') if not np.array_equal(scal.time, time): raise ValueError('Time axis not consistent across the scalogram list!') amps.append(scal.amplitude) amps = np.array(amps) ne, nf, nt = np.shape(amps) amp_qs = np.ndarray(shape=(np.size(qs), nf, nt)) for i in range(nf): for j in range(nt): amp_qs[:,i,j] = mquantiles(amps[:,i,j], qs) scal_list = [] for i, amp in enumerate(amp_qs): scal_tmp = Scalogram(frequency=freq, time=time, amplitude=amp, coi=coi, label=f'{qs[i]*100:g}%') scal_list.append(scal_tmp) scals = MultipleScalogram(scalogram_list=scal_list) return scals class Corr: ''' The object for correlation result in order to format the print message Parameters ---------- r: float the correlation coefficient p: float the p-value p_fmt_td: float the threshold for p-value formating (0.01 by default, i.e., if p<0.01, will print "< 0.01" instead of "0") p_fmt_style: str the style for p-value formating (exponential notation by default) signif: bool the significance alpha : float The significance level (0.05 by default) See also -------- pyleoclim.utils.correlation.corr_sig : Correlation function pyleoclim.utils.correlation.fdr : FDR function ''' def __init__(self, r, p, signif, alpha, p_fmt_td=0.01, p_fmt_style='exp'): self.r = r self.p = p self.p_fmt_td = p_fmt_td self.p_fmt_style = p_fmt_style self.signif = signif self.alpha = alpha def __str__(self): ''' Prints out the correlation results ''' formatted_p = pval_format(self.p, threshold=self.p_fmt_td, style=self.p_fmt_style) table = { 'correlation': [self.r], 'p-value': [formatted_p], f'signif. (α: {self.alpha})': [self.signif], } msg = print(tabulate(table, headers='keys')) return '' class CorrEns: ''' Correlation Ensemble Parameters ---------- r: list the list of correlation coefficients p: list the list of p-values p_fmt_td: float the threshold for p-value formating (0.01 by default, i.e., if p<0.01, will print "< 0.01" instead of "0") p_fmt_style: str the style for p-value formating (exponential notation by default) signif: list the list of significance without FDR signif_fdr: list the list of significance with FDR signif_fdr: list the list of significance with FDR alpha : float The significance level See also -------- pyleoclim.utils.correlation.corr_sig : Correlation function pyleoclim.utils.correlation.fdr : FDR function ''' def __init__(self, r, p, signif, signif_fdr, alpha, p_fmt_td=0.01, p_fmt_style='exp'): self.r = r self.p = p self.p_fmt_td = p_fmt_td self.p_fmt_style = p_fmt_style self.signif = signif self.signif_fdr = signif_fdr self.alpha = alpha def __str__(self): ''' Prints out the correlation results ''' pi_list = [] for pi in self.p: pi_list.append(pval_format(pi, threshold=self.p_fmt_td, style=self.p_fmt_style)) table = { 'correlation': self.r, 'p-value': pi_list, f'signif. w/o FDR (α: {self.alpha})': self.signif, f'signif. w/ FDR (α: {self.alpha})': self.signif_fdr, } msg = print(tabulate(table, headers='keys')) return f'Ensemble size: {len(self.r)}' def plot(self, figsize=[4, 4], title=None, ax=None, savefig_settings=None, hist_kwargs=None, title_kwargs=None, xlim=None, clr_insignif=sns.xkcd_rgb['grey'], clr_signif=sns.xkcd_rgb['teal'], clr_signif_fdr=sns.xkcd_rgb['pale orange'], clr_percentile=sns.xkcd_rgb['salmon'], rwidth=0.8, bins=None, vrange=None, mute=False): ''' Plot the correlation ensembles Parameters ---------- figsize : list, optional The figure size. The default is [4, 4]. title : str, optional Plot title. The default is None. savefig_settings : dict the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} hist_kwargs : dict the keyword arguments for ax.hist() title_kwargs : dict the keyword arguments for ax.set_title() ax : matplotlib.axis, optional the axis object from matplotlib See [matplotlib.axes](https://matplotlib.org/api/axes_api.html) for details. mute : {True,False} if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) xlim : list, optional x-axis limits. The default is None. See Also -------- matplotlib.pyplot.hist: https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.hist.html ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() hist_kwargs = {} if hist_kwargs is None else hist_kwargs.copy() if ax is None: fig, ax = plt.subplots(figsize=figsize) if vrange is None: vrange = [np.min(self.r), np.max(self.r)] clr_list = [clr_insignif, clr_signif, clr_signif_fdr] args = {'rwidth': rwidth, 'bins': bins, 'range': vrange, 'color': clr_list} args.update(hist_kwargs) # insignif_args.update(hist_kwargs) r_insignif = np.array(self.r)[~np.array(self.signif)] r_signif = np.array(self.r)[self.signif] r_signif_fdr = np.array(self.r)[self.signif_fdr] r_stack = [r_insignif, r_signif, r_signif_fdr] ax.hist(r_stack, stacked=True, **args) ax.legend([f'p ≥ {self.alpha}', f'p < {self.alpha} (w/o FDR)', f'p < {self.alpha} (w/ FDR)'], loc='upper left', bbox_to_anchor=(1.1, 1), ncol=1) frac_signif = np.size(r_signif) / np.size(self.r) frac_signif_fdr = np.size(r_signif_fdr) / np.size(self.r) ax.text(x=1.1, y=0.5, s=f'Fraction significant: {frac_signif*100:.1f}%', transform=ax.transAxes, fontsize=10, color=clr_signif) ax.text(x=1.1, y=0.4, s=f'Fraction significant: {frac_signif_fdr*100:.1f}%', transform=ax.transAxes, fontsize=10, color=clr_signif_fdr) r_pcts = np.percentile(self.r, [2.5, 25, 50, 75, 97.5]) trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) for r_pct, pt, ls in zip(r_pcts, np.array([2.5, 25, 50, 75, 97.5])/100, [':', '--', '-', '--', ':']): ax.axvline(x=r_pct, linestyle=ls, color=clr_percentile) ax.text(x=r_pct, y=1.02, s=pt, color=clr_percentile, transform=trans, ha='center', fontsize=10) ax.set_xlabel(r'$r$') ax.set_ylabel('Count') ax.yaxis.set_major_locator(MaxNLocator(integer=True)) if xlim is not None: ax.set_xlim(xlim) if title is not None: title_kwargs = {} if title_kwargs is None else title_kwargs.copy() t_args = {'y': 1.1, 'weight': 'bold'} t_args.update(title_kwargs) ax.set_title(title, **t_args) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax # if 'path' in savefig_settings: # plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) # return fig, ax class SpatialDecomp: ''' Class to hold the results of spatial decompositions applies to : `pca()`, `mcpca()`, `mssa()` Attributes ---------- time: float the common time axis locs: float (p, 2) a p x 2 array of coordinates (latitude, longitude) for mapping the spatial patterns ("EOFs") name: str name of the dataset/analysis to use in plots eigvals: float vector of eigenvalues from the decomposition eigvecs: float array of eigenvectors from the decomposition pctvar: float array of pct variance accounted for by each mode neff: float scalar representing the effective sample size of the leading mode ''' def __init__(self, time, locs, name, eigvals, eigvecs, pctvar, pcs, neff): self.time = time self.name = name self.locs = locs self.eigvals = eigvals self.eigvecs = eigvecs self.pctvar = pctvar self.pcs = pcs self.neff = neff def screeplot(self, figsize=[6, 4], uq='N82' ,title='scree plot', ax=None, savefig_settings=None, title_kwargs=None, xlim=[0,10], clr_eig='C0', mute=False): ''' Plot the eigenvalue spectrum with uncertainties Parameters ---------- figsize : list, optional The figure size. The default is [6, 4]. title : str, optional Plot title. The default is 'scree plot'. savefig_settings : dict the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} title_kwargs : dict, optional the keyword arguments for ax.set_title() ax : matplotlib.axis, optional the axis object from matplotlib See [matplotlib.axes](https://matplotlib.org/api/axes_api.html) for details. mute : {True,False} if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) xlim : list, optional x-axis limits. The default is [0, 10] (first 10 eigenvalues) uq : str, optional Method used for uncertainty quantification of the eigenvalues. 'N82' uses the North et al "rule of thumb" [1] with effective sample size computed as in [2]. 'MC' uses Monte-Carlo simulations (e.g. MC-EOF). Returns an error if no ensemble is found. clr_eig : str, optional color to be used for plotting eigenvalues References ---------- [1] North, G. R., T. L. Bell, R. F. Cahalan, and F. J. Moeng (1982), Sampling errors in the estimation of empirical orthogonal functions, Mon. Weather Rev., 110, 699–706. [2] Hannachi, A., I. T. Jolliffe, and D. B. Stephenson (2007), Empirical orthogonal functions and related techniques in atmospheric science: A review, International Journal of Climatology, 27(9), 1119–1152, doi:10.1002/joc.1499. ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() if ax is None: fig, ax = plt.subplots(figsize=figsize) if self.neff < 2: self.neff = 2 # compute 95% CI if uq == 'N82': eb_lbl = r'95% CI ($n_\mathrm{eff} = $'+ '{:.1f}'.format(self.neff) +')' # declare method Lc = self.eigvals # central estimate Lerr = np.tile(Lc,(2,1)) # declare array Lerr[0,:] = Lc*np.sqrt(1-np.sqrt(2/self.neff)) Lerr[1,:] = Lc*np.sqrt(1+np.sqrt(2/self.neff)) elif uq =='MC': eb_lbl = '95% CI (Monte Carlo)' # declare method try: Lq = np.quantile(self.eigvals,[0.025,0.5,0.975],axis = 1) Lc = Lq[1,:] Lerr = np.tile(Lc,(2,1)) # declare array Lerr[0,:] = Lq[0,:] Lerr[1,:] = Lq[2,:] except ValueError: print("Eigenvalue array must have more than 1 non-singleton dimension.") else: raise NameError("unknown UQ method. No action taken") idx = np.arange(len(Lc)) + 1 ax.errorbar(x=idx,y=Lc,yerr = Lerr, color=clr_eig,marker='o',ls='', alpha=1.0,label=eb_lbl) ax.set_title(title,fontweight='bold'); ax.legend(); ax.set_xlabel(r'Mode index $i$'); ax.set_ylabel(r'$\lambda_i$') ax.xaxis.set_major_locator(MaxNLocator(integer=True)) # enforce integer values if xlim is not None: ax.set_xlim(0.5,min(max(xlim),len(Lc))) if title is not None: title_kwargs = {} if title_kwargs is None else title_kwargs.copy() t_args = {'y': 1.1, 'weight': 'bold'} t_args.update(title_kwargs) ax.set_title(title, **t_args) if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax def modeplot(self, index=0, figsize=[10, 5], ax=None, savefig_settings=None, title_kwargs=None, mute=False, spec_method = 'mtm'): ''' Dashboard visualizing the properties of a given mode, including: 1. The temporal coefficient (PC or similar) 2. its spectrum 3. The spatial loadings (EOF or similar) Parameters ---------- index : int the (0-based) index of the mode to visualize. Default is 0, corresponding to the first mode. figsize : list, optional The figure size. The default is [10, 5]. savefig_settings : dict the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} title_kwargs : dict the keyword arguments for ax.set_title() gs : matplotlib.gridspec object, optional the axis object from matplotlib See [matplotlib.gridspec.GridSpec](https://matplotlib.org/stable/tutorials/intermediate/gridspec.html) for details. mute : {True,False} if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) spec_method: str, optional The name of the spectral method to be applied on the PC. Default: MTM Note that the data are evenly-spaced, so any spectral method that assumes even spacing is applicable here: 'mtm', 'welch', 'periodogram' 'wwz' is relevant if scaling exponents need to be estimated, but ill-advised otherwise, as it is very slow. ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() if ax is None: fig, ax = plt.subplots(figsize=figsize) PC = self.pcs[:,index] ts = Series(time=self.time, value=PC) # define timeseries object for the PC fig = plt.figure(tight_layout=True,figsize=figsize) gs = gridspec.GridSpec(2, 2) # define grid for subplots ax1 = fig.add_subplot(gs[0, :]) ts.plot(ax=ax1) ax1.set_ylabel('PC '+str(index+1)) ax1.set_title('Mode '+str(index+1)+', '+ '{:3.2f}'.format(self.pctvar[index]) + '% variance explained',weight='bold') # plot spectrum ax2 = fig.add_subplot(gs[1, 0]) psd_mtm_rc = ts.interp().spectral(method=spec_method) _ = psd_mtm_rc.plot(ax=ax2) ax2.set_xlabel('Period') ax2.set_title('Power spectrum ('+spec_method+')',weight='bold') # plot T-EOF ax3 = fig.add_subplot(gs[1, 1]) #EOF = self.eigvecs[:,mode] ax3.set_title('Spatial loadings \n (under construction)',weight='bold') # if title is not None: # title_kwargs = {} if title_kwargs is None else title_kwargs.copy() # t_args = {'y': 1.1, 'weight': 'bold'} # t_args.update(title_kwargs) # ax.set_title(title, **t_args) if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, gs class SsaRes: ''' Class to hold the results of SSA method Parameters ---------- eigvals: float (M, 1) a vector of real eigenvalues derived from the signal pctvar: float (M, 1) same vector, expressed in % variance accounted for by each mode. eigvals_q: float (M, 2) array containing the 5% and 95% quantiles of the Monte-Carlo eigenvalue spectrum [ assigned NaNs if unused ] eigvecs : float (M, M) a matrix of the temporal eigenvectors (T-EOFs), i.e. the temporal patterns that explain most of the variations in the original series. PC : float (N - M + 1, M) array of principal components, i.e. the loadings that, convolved with the T-EOFs, produce the reconstructed components, or RCs RCmat : float (N, M) array of reconstructed components, One can think of each RC as the contribution of each mode to the timeseries, weighted by their eigenvalue (loosely speaking, their "amplitude"). Summing over all columns of RC recovers the original series. (synthesis, the reciprocal operation of analysis). mode_idx: list index of retained modes RCseries : float (N, 1) reconstructed series based on the RCs of mode_idx (scaled to original series; mean must be added after the fact) See also -------- pyleoclim.utils.decomposition.ssa : Singular Spectrum Analysis ''' def __init__(self, time, original, name, eigvals, eigvecs, pctvar, PC, RCmat, RCseries,mode_idx, eigvals_q=None): self.time = time self.original = original self.name = name self.eigvals = eigvals self.eigvals_q = eigvals_q self.eigvecs = eigvecs self.pctvar = pctvar self.PC = PC self.RCseries = RCseries self.RCmat = RCmat self.mode_idx = mode_idx def screeplot(self, figsize=[6, 4], title='SSA scree plot', ax=None, savefig_settings=None, title_kwargs=None, xlim=None, clr_mcssa=sns.xkcd_rgb['red'], clr_signif=sns.xkcd_rgb['teal'], clr_eig='black', mute=False): ''' Scree plot for SSA, visualizing the eigenvalue spectrum and indicating which modes were retained. Parameters ---------- figsize : list, optional The figure size. The default is [6, 4]. title : str, optional Plot title. The default is 'SSA scree plot'. savefig_settings : dict the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} title_kwargs : dict the keyword arguments for ax.set_title() ax : matplotlib.axis, optional the axis object from matplotlib See [matplotlib.axes](https://matplotlib.org/api/axes_api.html) for details. mute : {True,False} if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) xlim : list, optional x-axis limits. The default is None. clr_mcssa : str, optional color of the Monte Carlo SSA AR(1) shading (if data are provided) default: red clr_eig : str, optional color of the eigenvalues, default: black clr_signif: str, optional color of the highlights for significant eigenvalue. default: teal ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() if ax is None: fig, ax = plt.subplots(figsize=figsize) v = self.eigvals n = self.PC.shape[0] #sample size dv = v*np.sqrt(2/(n-1)) idx = np.arange(len(v))+1 if self.eigvals_q is not None: plt.fill_between(idx,self.eigvals_q[:,0],self.eigvals_q[:,1], color=clr_mcssa, alpha = 0.3, label='AR(1) 5-95% quantiles') plt.errorbar(x=idx,y=v,yerr = dv, color=clr_eig,marker='o',ls='',alpha=1.0,label=self.name) plt.plot(idx[self.mode_idx],v[self.mode_idx],color=clr_signif,marker='o',ls='', markersize=4, label='modes retained',zorder=10) plt.title(title,fontweight='bold'); plt.legend() plt.xlabel(r'Mode index $i$'); plt.ylabel(r'$\lambda_i$') ax.xaxis.set_major_locator(MaxNLocator(integer=True)) # enforce integer values if xlim is not None: ax.set_xlim(0.5,min(max(xlim),len(v))) if title is not None: title_kwargs = {} if title_kwargs is None else title_kwargs.copy() t_args = {'y': 1.1, 'weight': 'bold'} t_args.update(title_kwargs) ax.set_title(title, **t_args) if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax def modeplot(self, index=0, figsize=[10, 5], ax=None, savefig_settings=None, title_kwargs=None, mute=False, spec_method = 'mtm', plot_original=False): ''' Dashboard visualizing the properties of a given SSA mode, including: 1. the analyzing function (T-EOF) 2. the reconstructed component (RC) 3. its spectrum Parameters ---------- index : int the (0-based) index of the mode to visualize. Default is 0, corresponding to the first mode. figsize : list, optional The figure size. The default is [10, 5]. savefig_settings : dict the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"} title_kwargs : dict the keyword arguments for ax.set_title() ax : matplotlib.axis, optional the axis object from matplotlib See [matplotlib.axes](https://matplotlib.org/api/axes_api.html) for details. mute : {True,False} if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax (going to be deprecated) spec_method: str, optional The name of the spectral method to be applied on the PC. Default: MTM Note that the data are evenly-spaced, so any spectral method that assumes even spacing is applicable here: 'mtm', 'welch', 'periodogram' 'wwz' is relevant too if scaling exponents need to be estimated. ''' savefig_settings = {} if savefig_settings is None else savefig_settings.copy() if ax is None: fig, ax = plt.subplots(figsize=figsize) RC = self.RCmat[:,index] fig = plt.figure(tight_layout=True,figsize=figsize) gs = gridspec.GridSpec(2, 2) # plot RC ax = fig.add_subplot(gs[0, :]) ax.plot(self.time,RC,label='mode '+str(index+1),zorder=99) if plot_original: ax.plot(self.time,self.original,color='Silver',lw=1,label='original') ax.legend() ax.set_xlabel('Time'), ax.set_ylabel('RC') ax.set_title('SSA Mode '+str(index+1)+' RC, '+ '{:3.2f}'.format(self.pctvar[index]) + '% variance explained',weight='bold') # plot T-EOF ax = fig.add_subplot(gs[1, 0]) ax.plot(self.eigvecs[:,index]) ax.set_title('Analyzing function') ax.set_xlabel('Time'), ax.set_ylabel('T-EOF') # plot spectrum ax = fig.add_subplot(gs[1, 1]) ts_rc = Series(time=self.time, value=RC) # define timeseries object for the RC psd_mtm_rc = ts_rc.interp().spectral(method=spec_method) _ = psd_mtm_rc.plot(ax=ax) ax.set_xlabel('Period') ax.set_title('Spectrum ('+spec_method+')') if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax class Lipd: '''Create a Lipd object from Lipd Files Parameters ---------- usr_path : str path to the Lipd file(s). Can be URL (LiPD utilities only support loading one file at a time from a URL) If it's a URL, it must start with "http", "https", or "ftp". lidp_dict : dict LiPD files already loaded into Python through the LiPD utilities validate : bool Validate the LiPD files upon loading. Note that for a large library this can take up to half an hour. remove : bool If validate is True and remove is True, ignores non-valid Lipd files. Note that loading unvalidated Lipd files may result in errors for some functionalities but not all. TODO ---- Support querying the LinkedEarth platform Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url='http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' d=pyleo.Lipd(usr_path=url) ''' def __init__(self, usr_path=None, lipd_dict=None, validate=False, remove=False): self.plot_default = {'ice-other': ['#FFD600','h'], 'ice/rock': ['#FFD600', 'h'], 'coral': ['#FF8B00','o'], 'documents':['k','p'], 'glacierice':['#86CDFA', 'd'], 'hybrid': ['#00BEFF','*'], 'lakesediment': ['#4169E0','s'], 'marinesediment': ['#8A4513', 's'], 'sclerosponge' : ['r','o'], 'speleothem' : ['#FF1492','d'], 'wood' : ['#32CC32','^'], 'molluskshells' : ['#FFD600','h'], 'peat' : ['#2F4F4F','*'], 'midden' : ['#824E2B','o'], 'other':['k','o']} if validate==False and remove==True: print('Removal of unvalidated LiPD files require validation') validate=True #prepare the dictionaries for all possible scenarios if usr_path!=None: # since readLipd() takes only absolute path and it will change the current working directory (CWD) without turning back, # we need to record CWD manually and turn back after the data loading is finished cwd = os.getcwd() if usr_path[:4] == 'http' or usr_path[:3] == 'ftp': # URL D_path = lpd.readLipd(usr_path) else: # local path abs_path = os.path.abspath(usr_path) D_path = lpd.readLipd(abs_path) os.chdir(cwd) #make sure that it's more than one if 'archiveType' in D_path.keys(): D_path={D_path['dataSetName']:D_path} if validate==True: cwd = os.getcwd() res=lpd.validate(D_path,detailed=False) os.chdir(cwd) if remove == True: for item in res: if item['status'] == 'FAIL': c=item['feedback']['errMsgs'] check = [] for i in c: if i.startswith('Mismatched columns'): check.append(1) else: check.append(0) if 0 in check: del D_path[item['filename'].strip('.lpd')] else: D_path={} if lipd_dict!=None: D_dict=lipd_dict if 'archiveType' in D_dict.keys(): D_dict={D_dict['dataSetName']:D_dict} if validate==True: cwd = os.getcwd() res=lpd.validate(D_dict,detailed=False) os.chdir(cwd) if remove == True: for item in res: if item['status'] == 'FAIL': c=item['feedback']['errMsgs'] check = [] for i in c: if i.startswith('Mismatched columns'): check.append(1) else: check.append(0) if 0 in check: del D_dict[item['filename'].strip('.lpd')] else: D_dict={} # raise an error if empty if not bool(D_dict) and not bool(D_path) == True: raise ValueError('No valid files; try without validation.') #assemble self.lipd={} self.lipd.update(D_path) self.lipd.update(D_dict) def __repr__(self): return str(self.__dict__) def copy(self): '''Copy the object ''' return deepcopy(self) def to_tso(self): '''Extracts all the timeseries objects to a list of LiPD tso Returns ------- ts_list : list List of Lipd timeseries objects as defined by LiPD utilities See also -------- pyleoclim.ui.LipdSeries : LiPD Series object. ''' cwd = os.getcwd() ts_list=lpd.extractTs(self.__dict__['lipd']) os.chdir(cwd) return ts_list def extract(self,dataSetName): ''' Parameters ---------- dataSetName : str Extract a particular dataset Returns ------- new : pyleoclim.Lipd A new object corresponding to a particular dataset ''' new = self.copy() try: dict_out=self.__dict__['lipd'][dataSetName] new.lipd=dict_out except: pass return new def to_LipdSeriesList(self, mode='paleo'): '''Extracts all LiPD timeseries objects to a list of LipdSeries objects Parameters ---------- mode : {'paleo','chron'} Whether to extract the timeseries information from the paleo tables or chron tables Returns ------- res : list A list of LiPDSeries objects See also -------- pyleoclim.ui.LipdSeries : LipdSeries object ''' cwd = os.getcwd() ts_list=lpd.extractTs(self.__dict__['lipd'], mode=mode) os.chdir(cwd) res=[] for idx, item in enumerate(ts_list): try: res.append(LipdSeries(item)) except: if mode == 'paleo': txt = 'The timeseries from ' + str(idx) + ': ' +\ item['dataSetName'] + ': ' + \ item['paleoData_variableName'] + \ ' could not be coerced into a LipdSeries object, passing' else: txt = 'The timeseries from ' + str(idx) + ': ' +\ item['dataSetName'] + ': ' + \ item['chronData_variableName'] + \ ' could not be coerced into a LipdSeries object, passing' warnings.warn(txt) pass return res def to_LipdSeries(self, number = None, mode = 'paleo'): '''Extracts one timeseries from the Lipd object Note that this function may require user interaction. Parameters ---------- number : int the number of the timeseries object mode : {'paleo','chron'} whether to extract the paleo or chron series. Returns ------- ts : pyleoclim.LipdSeries A LipdSeries object See also -------- pyleoclim.ui.LipdSeries : LipdSeries object ''' cwd = os.getcwd() ts_list = lpd.extractTs(self.__dict__['lipd'], mode=mode) os.chdir(cwd) if number is None: ts = LipdSeries(ts_list) else: try: number = int(number) except: raise TypeError('Number needs to be an integer or should be coerced into an integer.') ts = LipdSeries(ts_list[number]) return ts def mapAllArchive(self, projection = 'Robinson', proj_default = True, background = True,borders = False, rivers = False, lakes = False, figsize = None, ax = None, marker=None, color=None, markersize = None, scatter_kwargs=None, legend=True, lgd_kwargs=None, savefig_settings=None, mute=False): '''Map the records contained in LiPD files by archive type Parameters ---------- projection : str, optional The projection to use. The default is 'Robinson'. proj_default : bool, optional Wether to use the Pyleoclim defaults for each projection type. The default is True. background : bool, optional Wether to use a backgound. The default is True. borders : bool, optional Draw borders. The default is False. rivers : bool, optional Draw rivers. The default is False. lakes : bool, optional Draw lakes. The default is False. figsize : list, optional The size of the figure. The default is None. ax : matplotlib.ax, optional The matplotlib axis onto which to return the map. The default is None. marker : str, optional The marker type for each archive. The default is None. Uses plot_default color : str, optional Color for each acrhive. The default is None. Uses plot_default markersize : float, optional Size of the marker. The default is None. scatter_kwargs : dict, optional Parameters for the scatter plot. The default is None. legend : bool, optional Whether to plot the legend. The default is True. lgd_kwargs : dict, optional Arguments for the legend. The default is None. savefig_settings : dictionary, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"}. The default is None. mute : bool, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax. The default is False. (going to be deprecated) Returns ------- res : figure The figure See also -------- pyleoclim.utils.mapping.map_all : Underlying mapping function for Pyleoclim Examples -------- For speed, we are only using one LiPD file. But these functions can load and map multiple. .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) @savefig mapallarchive.png fig, ax = data.mapAllArchive() pyleo.closefig(fig) Change the markersize .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) @savefig mapallarchive_marker.png fig, ax = data.mapAllArchive(markersize=100) pyleo.closefig(fig) ''' scatter_kwargs = {} if scatter_kwargs is None else scatter_kwargs.copy() #get the information from the LiPD dict lat=[] lon=[] archiveType=[] for idx, key in enumerate(self.lipd): d = self.lipd[key] lat.append(d['geo']['geometry']['coordinates'][1]) lon.append(d['geo']['geometry']['coordinates'][0]) if 'archiveType' in d.keys(): archiveType.append(lipdutils.LipdToOntology(d['archiveType']).lower().replace(" ","")) else: archiveType.append('other') # make sure criteria is in the plot_default list for idx,val in enumerate(archiveType): if val not in self.plot_default.keys(): archiveType[idx] = 'other' if markersize is not None: scatter_kwargs.update({'s': markersize}) if marker==None: marker=[] for item in archiveType: marker.append(self.plot_default[item][1]) if color==None: color=[] for item in archiveType: color.append(self.plot_default[item][0]) res = mapping.map_all(lat=lat, lon=lon, criteria=archiveType, marker=marker, color =color, projection = projection, proj_default = proj_default, background = background,borders = borders, rivers = rivers, lakes = lakes, figsize = figsize, ax = ax, scatter_kwargs=scatter_kwargs, legend=legend, lgd_kwargs=lgd_kwargs,savefig_settings=savefig_settings, mute=mute) return res class LipdSeries(Series): '''Lipd time series object These objects can be obtained from a LiPD file/object either through Pyleoclim or the LiPD utilities. If multiple objects (i.e., a list) is given, then the user will be prompted to choose one timeseries. LipdSeries is a child of Series, therefore all the methods available for Series apply to LipdSeries in addition to some specific methods. Examples -------- In this example, we will import a LiPD file and explore the various options to create a series object. First, let's look at the Lipd.to_tso option. This method is attractive because the object is a list of dictionaries that are easily explored in Python. .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) ts_list = data.to_tso() # Print out the dataset name and the variable name for item in ts_list: print(item['dataSetName']+': '+item['paleoData_variableName']) # Load the sst data into a LipdSeries. Since Python indexing starts at zero, sst has index 5. ts = pyleo.LipdSeries(ts_list[5]) If you attempt to pass the full list of series, Pyleoclim will prompt you to choose a series by printing out something similar as above. If you already now the number of the timeseries object you're interested in, then you should use the following: .. ipython:: python :okwarning: :okexcept: ts1 = data.to_LipdSeries(number=5) If number is not specified, Pyleoclim will prompt you for the number automatically. Sometimes, one may want to create a MultipleSeries object from a collection of LiPD files. In this case, we recommend using the following: .. ipython:: python :okwarning: :okexcept: ts_list = data.to_LipdSeriesList() # only keep the Mg/Ca and SST ts_list=ts_list[4:] #create a MultipleSeries object ms=pyleo.MultipleSeries(ts_list) ''' def __init__(self, tso, clean_ts=True, verbose=False): if type(tso) is list: self.lipd_ts=lipdutils.getTs(tso) else: self.lipd_ts=tso self.plot_default = {'ice-other': ['#FFD600','h'], 'ice/rock': ['#FFD600', 'h'], 'coral': ['#FF8B00','o'], 'documents':['k','p'], 'glacierice':['#86CDFA', 'd'], 'hybrid': ['#00BEFF','*'], 'lakesediment': ['#4169E0','s'], 'marinesediment': ['#8A4513', 's'], 'sclerosponge' : ['r','o'], 'speleothem' : ['#FF1492','d'], 'wood' : ['#32CC32','^'], 'molluskshells' : ['#FFD600','h'], 'peat' : ['#2F4F4F','*'], 'midden' : ['#824E2B','o'], 'other':['k','o']} try: time, label= lipdutils.checkTimeAxis(self.lipd_ts) if label=='age': time_name='Age' if 'ageUnits' in self.lipd_ts.keys(): time_unit=self.lipd_ts['ageUnits'] else: time_unit=None elif label=='year': time_name='Year' if 'yearUnits' in self.lipd_ts.keys(): time_unit=self.lipd_ts['yearUnits'] else: time_unit=None try: if self.lipd_ts['mode'] == 'paleoData': value=np.array(self.lipd_ts['paleoData_values'],dtype='float64') value_name=self.lipd_ts['paleoData_variableName'] if 'paleoData_units' in self.lipd_ts.keys(): value_unit=self.lipd_ts['paleoData_units'] else: value_unit=None label=self.lipd_ts['dataSetName'] super(LipdSeries,self).__init__(time=time,value=value,time_name=time_name, time_unit=time_unit,value_name=value_name,value_unit=value_unit, label=label,clean_ts=clean_ts,verbose=verbose) elif self.lipd_ts['mode'] == 'chronData': value=np.array(self.lipd_ts['chronData_values'],dtype='float64') value_name=self.lipd_ts['chronData_variableName'] if 'paleoData_units' in self.lipd_ts.keys(): value_unit=self.lipd_ts['chronData_units'] else: value_unit=None label=self.lipd_ts['dataSetName'] super(LipdSeries,self).__init__(time=time,value=value,time_name=time_name, time_unit=time_unit,value_name=value_name,value_unit=value_unit, label=label,clean_ts=clean_ts,verbose=verbose) except: raise ValueError("paleoData_values should contain floats") except: raise KeyError("No time information present") def copy(self): '''Copy the object ''' return deepcopy(self) def chronEnsembleToPaleo(self,D,number=None,chronNumber=None, modelNumber=None,tableNumber=None): '''Fetch chron ensembles from a lipd object and return the ensemble as MultipleSeries Parameters ---------- D : a LiPD object number: int, optional The number of ensemble members to store. Default is None, which corresponds to all present chronNumber: int, optional The chron object number. The default is None. modelNumber : int, optional Age model number. The default is None. tableNumber : int, optional Table Number. The default is None. Raises ------ ValueError Returns ------- ms : pyleoclim.EnsembleSeries An EnsembleSeries object with each series representing a possible realization of the age model ''' #get the corresponding LiPD dataSetName=self.lipd_ts['dataSetName'] if type(D) is dict: try: lipd=D[dataSetName] except: lipd=D else: a=D.extract(dataSetName) lipd=a.__dict__['lipd'] #Look for the ensemble and get values cwd = os.getcwd() csv_dict=lpd.getCsv(lipd) os.chdir(cwd) chron,paleo = lipdutils.isEnsemble(csv_dict) if len(chron)==0: raise ValueError("No ChronMeasurementTables available") elif len(chron)>1: if chronNumber==None or modelNumber==None or tableNumber==None: csvName=lipdutils.whichEnsemble(chron) else: str0='chron'+str(chronNumber) str1='model'+str(modelNumber) str2='ensemble'+str(tableNumber) for item in chron: if str0 in item and str1 in item and str2 in item: csvName=item depth, ensembleValues =lipdutils.getEnsemble(csv_dict,csvName) else: depth, ensembleValues =lipdutils.getEnsemble(csv_dict,chron[0]) #make sure it's sorted sort_ind = np.argsort(depth) depth=list(np.array(depth)[sort_ind]) ensembleValues=ensembleValues[sort_ind,:] if number is not None: if number>np.shape(ensembleValues)[1]: warnings.warn('Selected number of ensemble members is greater than number of members in the ensemble table; passing') pass else: ensembleValues=ensembleValues[:,0:number] #Map to paleovalues key=[] for item in self.lipd_ts.keys(): if 'depth' in item and 'Units' not in item: key.append(item) key=key[0] ds= np.array(self.lipd_ts[key],dtype='float64') if 'paleoData_values' in self.lipd_ts.keys(): ys= np.array(self.lipd_ts['paleoData_values'],dtype='float64') elif 'chronData_values' in self.lipd_ts.keys(): ys= np.array(self.lipd_ts['chronData_values'],dtype='float64') else: raise KeyError('no y-axis values available') #Remove NaNs ys_tmp=np.copy(ys) ds=ds[~np.isnan(ys_tmp)] sort_ind2=np.argsort(ds) ds=np.array(ds[sort_ind2]) ys=np.array(ys[sort_ind2]) ensembleValuestoPaleo=lipdutils.mapAgeEnsembleToPaleoData(ensembleValues, depth, ds) #create multipleseries s_list=[] for i, s in enumerate(ensembleValuestoPaleo.T): s_tmp = Series(time=s,value=ys, verbose=i==0, clean_ts=False, value_name=self.value_name, value_unit=self.value_unit, time_name=self.time_name, time_unit=self.time_unit) s_list.append(s_tmp) ens = EnsembleSeries(series_list=s_list) return ens def map(self,projection = 'Orthographic', proj_default = True, background = True,borders = False, rivers = False, lakes = False, figsize = None, ax = None, marker=None, color=None, markersize = None, scatter_kwargs=None, legend=True, lgd_kwargs=None, savefig_settings=None, mute=False): '''Map the location of the record Parameters ---------- projection : str, optional The projection to use. The default is 'Robinson'. proj_default : bool, optional Wether to use the Pyleoclim defaults for each projection type. The default is True. background : bool, optional Wether to use a backgound. The default is True. borders : bool, optional Draw borders. The default is False. rivers : bool, optional Draw rivers. The default is False. lakes : bool, optional Draw lakes. The default is False. figsize : list, optional The size of the figure. The default is None. ax : matplotlib.ax, optional The matplotlib axis onto which to return the map. The default is None. marker : str, optional The marker type for each archive. The default is None. Uses plot_default color : str, optional Color for each acrhive. The default is None. Uses plot_default markersize : float, optional Size of the marker. The default is None. scatter_kwargs : dict, optional Parameters for the scatter plot. The default is None. legend : bool, optional Whether to plot the legend. The default is True. lgd_kwargs : dict, optional Arguments for the legend. The default is None. savefig_settings : dictionary, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"}. The default is None. mute : bool, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax. The default is False. (going to be deprecated) Returns ------- res : fig,ax See also -------- pyleoclim.utils.mapping.map_all : Underlying mapping function for Pyleoclim Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) ts = data.to_LipdSeries(number=5) @savefig mapone.png fig, ax = ts.map() pyleo.closefig(fig) ''' scatter_kwargs = {} if scatter_kwargs is None else scatter_kwargs.copy() #get the information from the timeseries lat=[self.lipd_ts['geo_meanLat']] lon=[self.lipd_ts['geo_meanLon']] if 'archiveType' in self.lipd_ts.keys(): archiveType=lipdutils.LipdToOntology(self.lipd_ts['archiveType']).lower().replace(" ","") else: archiveType='other' # make sure criteria is in the plot_default list if archiveType not in self.plot_default.keys(): archiveType = 'other' if markersize is not None: scatter_kwargs.update({'s': markersize}) if marker==None: marker= self.plot_default[archiveType][1] if color==None: color=self.plot_default[archiveType][0] if proj_default==True: proj1={'central_latitude':lat[0], 'central_longitude':lon[0]} proj2={'central_latitude':lat[0]} proj3={'central_longitude':lon[0]} archiveType=[archiveType] #list so it will work with map_all marker=[marker] color=[color] if proj_default==True: try: res = mapping.map_all(lat=lat, lon=lon, criteria=archiveType, marker=marker, color =color, projection = projection, proj_default = proj1, background = background,borders = borders, rivers = rivers, lakes = lakes, figsize = figsize, ax = ax, scatter_kwargs=scatter_kwargs, legend=legend, lgd_kwargs=lgd_kwargs,savefig_settings=savefig_settings, mute=mute) except: try: res = mapping.map_all(lat=lat, lon=lon, criteria=archiveType, marker=marker, color =color, projection = projection, proj_default = proj3, background = background,borders = borders, rivers = rivers, lakes = lakes, figsize = figsize, ax = ax, scatter_kwargs=scatter_kwargs, legend=legend, lgd_kwargs=lgd_kwargs,savefig_settings=savefig_settings, mute=mute) except: res = mapping.map_all(lat=lat, lon=lon, criteria=archiveType, marker=marker, color =color, projection = projection, proj_default = proj2, background = background,borders = borders, rivers = rivers, lakes = lakes, figsize = figsize, ax = ax, scatter_kwargs=scatter_kwargs, legend=legend, lgd_kwargs=lgd_kwargs,savefig_settings=savefig_settings, mute=mute) else: res = mapping.map_all(lat=lat, lon=lon, criteria=archiveType, marker=marker, color =color, projection = projection, proj_default = proj_default, background = background,borders = borders, rivers = rivers, lakes = lakes, figsize = figsize, ax = ax, scatter_kwargs=scatter_kwargs, legend=legend, lgd_kwargs=lgd_kwargs,savefig_settings=savefig_settings, mute=mute) return res def getMetadata(self): """ Get the necessary metadata for the ensemble plots Parameters ---------- timeseries : object a specific timeseries object. Returns ------- res : dict A dictionary containing the following metadata: archiveType Authors (if more than 2, replace by et al) PublicationYear Publication DOI Variable Name Units Climate Interpretation Calibration Equation Calibration References Calibration Notes """ # Get all the necessary information # Top level information if "archiveType" in self.lipd_ts.keys(): archiveType = self.lipd_ts["archiveType"] else: archiveType = "NA" if "pub1_author" in self.lipd_ts.keys(): authors = self.lipd_ts["pub1_author"] else: authors = "NA" #Truncate if more than two authors idx = [pos for pos, char in enumerate(authors) if char == ";"] if len(idx)>2: authors = authors[0:idx[1]+1] + "et al." if "pub1_year" in self.lipd_ts.keys(): Year = str(self.lipd_ts["pub1_year"]) else: Year = "NA" if "pub1_doi" in self.lipd_ts.keys(): DOI = self.lipd_ts["pub1_doi"] else: DOI = "NA" if self.lipd_ts['mode'] == 'paleoData': prefix = 'paleo' else: prefix = 'chron' if prefix+"Data_InferredVariableType" in self.lipd_ts.keys(): if type(self.lipd_ts[prefix+"Data_InferredVariableType"]) is list: Variable = self.lipd_ts[prefix+"Data_InferredVariableType"][0] else: Variable = self.lipd_ts[prefix+"Data_InferredVariableType"] elif prefix+"Data_ProxyObservationType" in self.lipd_ts.keys(): if type(self.lipd_ts[prefix+"Data_ProxyObservationType"]) is list: Variable = self.lipd_ts[prefix+"Data_ProxyObservationType"][0] else: Variable = self.lipd_ts[prefix+"Data_ProxyObservationType"] else: Variable = self.lipd_ts[prefix+"Data_variableName"] if prefix+"Data_units" in self.lipd_ts.keys(): units = self.lipd_ts[prefix+"Data_units"] else: units = "NA" #Climate interpretation information if prefix+"Data_interpretation" in self.lipd_ts.keys(): interpretation = self.lipd_ts[prefix+"Data_interpretation"][0] if "name" in interpretation.keys(): ClimateVar = interpretation["name"] elif "variable" in interpretation.keys(): ClimateVar = interpretation["variable"] else: ClimateVar = "NA" if "detail" in interpretation.keys(): Detail = interpretation["detail"] elif "variableDetail" in interpretation.keys(): Detail = interpretation['variableDetail'] else: Detail = "NA" if "scope" in interpretation.keys(): Scope = interpretation['scope'] else: Scope = "NA" if "seasonality" in interpretation.keys(): Seasonality = interpretation["seasonality"] else: Seasonality = "NA" if "interpdirection" in interpretation.keys(): Direction = interpretation["interpdirection"] else: Direction = "NA" else: ClimateVar = "NA" Detail = "NA" Scope = "NA" Seasonality = "NA" Direction = "NA" # Calibration information if prefix+"Data_calibration" in self.lipd_ts.keys(): calibration = self.lipd_ts[prefix+'Data_calibration'][0] if "equation" in calibration.keys(): Calibration_equation = calibration["equation"] else: Calibration_equation = "NA" if "calibrationReferences" in calibration.keys(): ref = calibration["calibrationReferences"] if "author" in ref.keys(): ref_author = ref["author"][0] # get the first author else: ref_author = "NA" if "publicationYear" in ref.keys(): ref_year = str(ref["publicationYear"]) else: ref_year="NA" Calibration_notes = ref_author +"."+ref_year elif "notes" in calibration.keys(): Calibration_notes = calibration["notes"] else: Calibration_notes = "NA" else: Calibration_equation = "NA" Calibration_notes = "NA" #Truncate the notes if too long charlim = 30; if len(Calibration_notes)>charlim: Calibration_notes = Calibration_notes[0:charlim] + " ..." res = {"archiveType" : archiveType, "authors" : authors, "Year": Year, "DOI": DOI, "Variable": Variable, "units": units, "Climate_Variable" : ClimateVar, "Detail" : Detail, "Scope":Scope, "Seasonality" : Seasonality, "Interpretation_Direction" : Direction, "Calibration_equation" : Calibration_equation, "Calibration_notes" : Calibration_notes} return res def dashboard(self, figsize = [11,8], plt_kwargs=None, distplt_kwargs=None, spectral_kwargs=None, spectralsignif_kwargs=None, spectralfig_kwargs=None, map_kwargs=None, metadata = True, savefig_settings=None, mute=False, ensemble = False, D=None): ''' Parameters ---------- figsize : list, optional Figure size. The default is [11,8]. plt_kwargs : dict, optional Optional arguments for the timeseries plot. See Series.plot() or EnsembleSeries.plot_envelope(). The default is None. distplt_kwargs : dict, optional Optional arguments for the distribution plot. See Series.distplot() or EnsembleSeries.plot_displot(). The default is None. spectral_kwargs : dict, optional Optional arguments for the spectral method. Default is to use Lomb-Scargle method. See Series.spectral() or EnsembleSeries.spectral(). The default is None. spectralsignif_kwargs : dict, optional Optional arguments to estimate the significance of the power spectrum. See PSD.signif_test. Note that we currently do not support significance testing for ensembles. The default is None. spectralfig_kwargs : dict, optional Optional arguments for the power spectrum figure. See PSD.plot() or MultiplePSD.plot_envelope(). The default is None. map_kwargs : dict, optional Optional arguments for the map. See LipdSeries.map(). The default is None. metadata : {True,False}, optional Whether or not to produce a dashboard with printed metadata. The default is True. savefig_settings : dict, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"}. The default is None. mute : {True,False}, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax. The default is False. (going to be deprecated) ensemble : {True, False}, optional If True, will return the dashboard in ensemble modes if ensembles are available D : pyleoclim.Lipd If asking for an ensemble plot, a pyleoclim.Lipd object must be provided Returns ------- fig : matplotlib.figure The figure ax : matplolib.axis The axis. See also -------- pyleoclim.Series.plot : plot a timeseries pyleoclim.EnsembleSeries.plot_envelope: Envelope plots for an ensemble pyleoclim.Series.distplot : plot a distribution of the timeseries pyleoclim.EnsembleSeries.distplot : plot a distribution of the timeseries across ensembles pyleoclim.Series.spectral : spectral analysis method. pyleoclim.MultipleSeries.spectral : spectral analysis method for multiple series. pyleoclim.PSD.signif_test : significance test for timeseries analysis pyleoclim.PSD.plot : plot power spectrum pyleoclim.MulitplePSD.plot : plot envelope of power spectrum pyleoclim.LipdSeries.map : map location of dataset pyleolim.LipdSeries.getMetadata : get relevant metadata from the timeseries object pyleoclim.utils.mapping.map_all : Underlying mapping function for Pyleoclim Examples -------- .. ipython:: python :okwarning: :okexcept: import pyleoclim as pyleo url = 'http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=MD982176.Stott.2004' data = pyleo.Lipd(usr_path = url) ts = data.to_LipdSeries(number=5) @savefig ts_dashboard.png fig, ax = ts.dashboard() pyleo.closefig(fig) ''' if ensemble == True and D is None: raise ValueError("When an ensemble dashboard is requested, the corresponsind Lipd object must be supplied") if ensemble == True: warnings.warn('Some of the computation in ensemble mode can require a few minutes to complete.') savefig_settings = {} if savefig_settings is None else savefig_settings.copy() res=self.getMetadata() # start plotting fig = plt.figure(figsize=figsize) gs = gridspec.GridSpec(2,5) gs.update(left=0,right=1.1) if ensemble==True: ens = self.chronEnsembleToPaleo(D) ensc = ens.common_time() ax={} # Plot the timeseries plt_kwargs={} if plt_kwargs is None else plt_kwargs.copy() ax['ts'] = plt.subplot(gs[0,:-3]) plt_kwargs.update({'ax':ax['ts']}) # use the defaults if color/markers not specified if ensemble == False: if 'marker' not in plt_kwargs.keys(): archiveType = lipdutils.LipdToOntology(res['archiveType']).lower().replace(" ","") plt_kwargs.update({'marker':self.plot_default[archiveType][1]}) if 'color' not in plt_kwargs.keys(): archiveType = lipdutils.LipdToOntology(res['archiveType']).lower().replace(" ","") plt_kwargs.update({'color':self.plot_default[archiveType][0]}) ax['ts'] = self.plot(**plt_kwargs) elif ensemble == True: if 'curve_clr' not in plt_kwargs.keys(): archiveType = lipdutils.LipdToOntology(res['archiveType']).lower().replace(" ","") plt_kwargs.update({'curve_clr':self.plot_default[archiveType][0]}) if 'shade_clr' not in plt_kwargs.keys(): archiveType = lipdutils.LipdToOntology(res['archiveType']).lower().replace(" ","") plt_kwargs.update({'shade_clr':self.plot_default[archiveType][0]}) #plt_kwargs.update({'ylabel':self.value_name}) ax['ts'] = ensc.plot_envelope(**plt_kwargs) else: raise ValueError("Invalid argument value for ensemble") ymin, ymax = ax['ts'].get_ylim() #plot the distplot distplt_kwargs={} if distplt_kwargs is None else distplt_kwargs.copy() ax['dts'] = plt.subplot(gs[0,2]) distplt_kwargs.update({'ax':ax['dts']}) distplt_kwargs.update({'ylabel':'PDF'}) distplt_kwargs.update({'vertical':True}) if 'color' not in distplt_kwargs.keys(): archiveType = lipdutils.LipdToOntology(res['archiveType']).lower().replace(" ","") distplt_kwargs.update({'color':self.plot_default[archiveType][0]}) if ensemble == False: ax['dts'] = self.distplot(**distplt_kwargs) elif ensemble == True: ax['dts'] = ensc.distplot(**distplt_kwargs) ax['dts'].set_ylim([ymin,ymax]) ax['dts'].set_yticklabels([]) ax['dts'].set_ylabel('') ax['dts'].set_yticks([]) #make the map - brute force since projection is not being returned properly lat=[self.lipd_ts['geo_meanLat']] lon=[self.lipd_ts['geo_meanLon']] map_kwargs={} if map_kwargs is None else map_kwargs.copy() if 'projection' in map_kwargs.keys(): projection=map_kwargs['projection'] else: projection='Orthographic' if 'proj_default' in map_kwargs.keys(): proj_default=map_kwargs['proj_default'] else: proj_default=True if proj_default==True: proj1={'central_latitude':lat[0], 'central_longitude':lon[0]} proj2={'central_latitude':lat[0]} proj3={'central_longitude':lon[0]} try: proj = mapping.set_proj(projection=projection, proj_default=proj1) except: try: proj = mapping.set_proj(projection=projection, proj_default=proj3) except: proj = mapping.set_proj(projection=projection, proj_default=proj2) if 'marker' in map_kwargs.keys(): marker = map_kwargs['marker'] else: marker = self.plot_default[archiveType][1] if 'color' in map_kwargs.keys(): color = map_kwargs['color'] else: color = self.plot_default[archiveType][0] if 'background' in map_kwargs.keys(): background = map_kwargs['background'] else: background = True if 'borders' in map_kwargs.keys(): borders= map_kwargs['borders'] else: borders = False if 'rivers' in map_kwargs.keys(): rivers= map_kwargs['rivers'] else: rivers = False if 'lakes' in map_kwargs.keys(): lakes = map_kwargs['lakes'] else: lakes = False if 'scatter_kwargs' in map_kwargs.keys(): scatter_kwargs = map_kwargs['scatter_kwargs'] else: scatter_kwargs={} if 'markersize' in map_kwargs.keys(): scatter_kwargs.update({'s': map_kwargs['markersize']}) else: pass if 'lgd_kwargs' in map_kwargs.keys(): lgd_kwargs = map_kwargs['lgd_kwargs'] else: lgd_kwargs ={} if 'legend' in map_kwargs.keys(): legend = map_kwargs['legend'] else: legend = False #make the plot map data_crs = ccrs.PlateCarree() ax['map'] = plt.subplot(gs[1,0],projection=proj) ax['map'].coastlines() if background is True: ax['map'].stock_img() #Other extra information if borders is True: ax['map'].add_feature(cfeature.BORDERS) if lakes is True: ax['map'].add_feature(cfeature.LAKES) if rivers is True: ax['map'].add_feature(cfeature.RIVERS) ax['map'].scatter(lon,lat,zorder=10,label=marker,facecolor=color,transform=data_crs, **scatter_kwargs) if legend == True: ax.legend(**lgd_kwargs) #spectral analysis spectral_kwargs={} if spectral_kwargs is None else spectral_kwargs.copy() if 'method' in spectral_kwargs.keys(): pass else: spectral_kwargs.update({'method':'lomb_scargle'}) if 'freq_method' in spectral_kwargs.keys(): pass else: if ensemble == False: spectral_kwargs.update({'freq_method':'lomb_scargle'}) elif ensemble == True: pass ax['spec'] = plt.subplot(gs[1,1:3]) spectralfig_kwargs={} if spectralfig_kwargs is None else spectralfig_kwargs.copy() spectralfig_kwargs.update({'ax':ax['spec']}) if ensemble == False: ts_preprocess = self.detrend().standardize() psd = ts_preprocess.spectral(**spectral_kwargs) #Significance test spectralsignif_kwargs={} if spectralsignif_kwargs is None else spectralsignif_kwargs.copy() psd_signif = psd.signif_test(**spectralsignif_kwargs) #plot if 'color' not in spectralfig_kwargs.keys(): archiveType = lipdutils.LipdToOntology(res['archiveType']).lower().replace(" ","") spectralfig_kwargs.update({'color':self.plot_default[archiveType][0]}) if 'signif_clr' not in spectralfig_kwargs.keys(): spectralfig_kwargs.update({'signif_clr':'grey'}) ax['spec'] = psd_signif.plot(**spectralfig_kwargs) elif ensemble == True: if 'curve_clr' not in spectralfig_kwargs.keys(): archiveType = lipdutils.LipdToOntology(res['archiveType']).lower().replace(" ","") spectralfig_kwargs.update({'curve_clr':self.plot_default[archiveType][0]}) if 'shade_clr' not in spectralfig_kwargs.keys(): archiveType = lipdutils.LipdToOntology(res['archiveType']).lower().replace(" ","") spectralfig_kwargs.update({'shade_clr':self.plot_default[archiveType][0]}) psd = ensc.detrend().standardize().spectral(**spectral_kwargs) #plot ax['spec'] = psd.plot_envelope(**spectralfig_kwargs) #Make the plot if metadata == True: # get metadata textstr = "archiveType: " + res["archiveType"]+"\n"+"\n"+\ "Authors: " + res["authors"]+"\n"+"\n"+\ "Year: " + res["Year"]+"\n"+"\n"+\ "DOI: " + res["DOI"]+"\n"+"\n"+\ "Variable: " + res["Variable"]+"\n"+"\n"+\ "units: " + res["units"]+"\n"+"\n"+\ "Climate Interpretation: " +"\n"+\ " Climate Variable: " + res["Climate_Variable"] +"\n"+\ " Detail: " + res["Detail"]+"\n"+\ " Seasonality: " + res["Seasonality"]+"\n"+\ " Direction: " + res["Interpretation_Direction"]+"\n \n"+\ "Calibration: \n" + \ " Equation: " + res["Calibration_equation"] + "\n" +\ " Notes: " + res["Calibration_notes"] plt.figtext(0.7, 0.4, textstr, fontsize = 12) if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax def mapNearRecord(self, D, n=5, radius = None, sameArchive = False, projection='Orthographic',proj_default = True, background = True,borders = False, rivers = False, lakes = False, figsize = None, ax = None, marker_ref= None, color_ref=None, marker=None, color=None, markersize_adjust=False, scale_factor = 100, scatter_kwargs=None, legend = True, lgd_kwargs=None, savefig_settings=None, mute=False): """ Map records that are near the timeseries of interest Parameters ---------- D : pyleoclim.Lipd A pyleoclim LiPD object n : int, optional The n number of closest records. The default is 5. radius : float, optional The radius to take into consideration when looking for records (in km). The default is None. sameArchive : {True, False}, optional Whether to consider records from the same archiveType as the original record. The default is False. projection : string, optional A valid cartopy projection. The default is 'Orthographic'. proj_default : True or dict, optional The projection arguments. If not True, then use a dictionary to pass the appropriate arguments depending on the projection. The default is True. background : {True,False}, optional Whether to use a background. The default is True. borders : {True, False}, optional Whether to plot country borders. The default is False. rivers : {True, False}, optional Whether to plot rivers. The default is False. lakes : {True, False}, optional Whether to plot rivers. The default is False. figsize : list, optional the size of the figure. The default is None. ax : matplotlib.ax, optional The matplotlib axis onto which to return the map. The default is None. marker_ref : str, optional Marker shape to use for the main record. The default is None, which corresponds to the default marker for the archiveType color_ref : str, optional The color for the main record. The default is None, which corresponds to the default color for the archiveType. marker : str or list, optional Marker shape to use for the other records. The default is None, which corresponds to the marker shape for each archiveType. color : str or list, optional Color for each marker. The default is None, which corresponds to the color for each archiveType markersize_adjust : {True, False}, optional Whether to adjust the marker size according to distance from record of interest. The default is False. scale_factor : int, optional The maximum marker size. The default is 100. scatter_kwargs : dict, optional Parameters for the scatter plot. The default is None. legend : {True, False}, optional Whether to show the legend. The default is True. lgd_kwargs : dict, optional Parameters for the legend. The default is None. savefig_settings : dict, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"}. The default is None. mute : {True, False}, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax. The default is False. (going to be deprecated) See also -------- pyleoclim.utils.mapping.map_all : Underlying mapping function for Pyleoclim pyleoclim.utils.mapping.dist_sphere: Calculate distance on a sphere pyleoclim.utils.mapping.compute_dist: Compute the distance between a point and an array pyleoclim.utils.mapping.within_distance: Returns point in an array within a certain distance Returns ------- res : dict contains fig and ax """ scatter_kwargs = {} if scatter_kwargs is None else scatter_kwargs.copy() #get the information about the original timeseries lat_ref=[self.lipd_ts['geo_meanLat']] lon_ref=[self.lipd_ts['geo_meanLon']] if 'archiveType' in self.lipd_ts.keys(): archiveType_ref=lipdutils.LipdToOntology(self.lipd_ts['archiveType']).lower().replace(" ","") else: archiveType_ref='other' # make sure criteria is in the plot_default list if archiveType_ref not in self.plot_default.keys(): archiveType_ref = 'other' # get information about the other timeseries lat=[] lon=[] archiveType=[] dataSetName_ref = self.lipd_ts['dataSetName'] for idx, key in enumerate(D.lipd): if key != dataSetName_ref: d = D.lipd[key] lat.append(d['geo']['geometry']['coordinates'][1]) lon.append(d['geo']['geometry']['coordinates'][0]) if 'archiveType' in d.keys(): archiveType.append(lipdutils.LipdToOntology(d['archiveType']).lower().replace(" ","")) else: archiveType.append('other') # make sure criteria is in the plot_default list for idx,val in enumerate(archiveType): if val not in self.plot_default.keys(): archiveType[idx] = 'other' if len(lat)==0: #this should not happen unless the coordinates are not available in the LiPD file raise ValueError('no matching record found') # Filter by the same type of archive if asked if sameArchive == True: idx_archive = [idx for idx,val in enumerate(archiveType) if val==archiveType_ref] if len(idx_archive)==0: raise ValueError('No records corresponding to the same archiveType available. Widen your search criteria.') else: lat = [lat[idx] for idx in idx_archive] lon = [lon[idx] for idx in idx_archive] archiveType=[archiveType[idx] for idx in idx_archive] #compute the distance dist = mapping.compute_dist(lat_ref,lon_ref,lat,lon) if radius: idx_radius = mapping.within_distance(dist, radius) if len(idx_radius) == 0: raise ValueError('No records withing matching radius distance. Widen your search criteria') else: lat = [lat[idx] for idx in idx_radius] lon = [lon[idx] for idx in idx_radius] archiveType = [archiveType[idx] for idx in idx_radius] dist = [dist[idx] for idx in idx_radius] #print a warning if plotting less than asked because of the filters if n>len(dist): warnings.warn("Number of matching records is less"+\ " than the number of neighbors chosen. Including all records "+\ " in the analysis.") n=len(dist) #Sort the distance array sort_idx = np.argsort(dist) dist = [dist[idx] for idx in sort_idx] lat = [lat[idx] for idx in sort_idx] lon = [lon[idx] for idx in sort_idx] archiveType = [archiveType[idx] for idx in sort_idx] # Grab the right number of records dist = dist[0:n] lat = lat[0:n] lon = lon[0:n] archiveType = archiveType[0:n] # Get plotting information if marker_ref == None: marker_ref = self.plot_default[archiveType_ref][1] if color_ref == None: color_ref = self.plot_default[archiveType_ref][0] if marker == None: marker=[] for item in archiveType: marker.append(self.plot_default[item][1]) elif type(marker) ==list: if len(marker)!=len(lon): raise ValueError('When providing a list, it should be the same length as the number of records') elif type(marker) == str: marker = [marker]*len(lon) if color == None: color=[] for item in archiveType: color.append(self.plot_default[item][0]) elif type(color) ==list: if len(color)!=len(lon): raise ValueError('When providing a list, it should be the same length as the number of records') elif type(color) == str: color = [color]*len(lon) if 'edgecolors' not in scatter_kwargs.keys(): edgecolors = [] for item in marker: edgecolors.append('w') edgecolors.append('k') scatter_kwargs.update({'edgecolors':edgecolors}) #Start plotting lat_all = lat + lat_ref lon_all = lon + lon_ref dist_all = dist + [0] archiveType_all = archiveType archiveType_all.append(archiveType_ref) color_all = color color_all.append(color_ref) marker_all= marker marker_all.append(marker_ref) if markersize_adjust == True: scale = dist_all[-1]/(scale_factor-30) s = list(np.array(dist_all)*1/(scale)+30) s.reverse() scatter_kwargs.update({'s':s}) proj1={'central_latitude':lat_ref[0], 'central_longitude':lon_ref[0]} proj2={'central_latitude':lat_ref[0]} proj3={'central_longitude':lon_ref[0]} if proj_default==True: try: res = mapping.map_all(lat=lat_all, lon=lon_all, criteria=archiveType_all, marker=marker_all, color =color_all, projection = projection, proj_default = proj1, background = background,borders = borders, rivers = rivers, lakes = lakes, figsize = figsize, ax = ax, scatter_kwargs=scatter_kwargs, legend=legend, lgd_kwargs=lgd_kwargs,savefig_settings=savefig_settings, mute=mute) except: try: res = mapping.map_all(lat=lat_all, lon=lon_all, criteria=archiveType_all, marker=marker_all, color =color_all, projection = projection, proj_default = proj2, background = background,borders = borders, rivers = rivers, lakes = lakes, figsize = figsize, ax = ax, scatter_kwargs=scatter_kwargs, legend=legend, lgd_kwargs=lgd_kwargs,savefig_settings=savefig_settings, mute=mute) except: res = mapping.map_all(lat=lat_all, lon=lon_all, criteria=archiveType_all, marker=marker_all, color =color_all, projection = projection, proj_default = proj3, background = background,borders = borders, rivers = rivers, lakes = lakes, figsize = figsize, ax = ax, scatter_kwargs=scatter_kwargs, legend=legend, lgd_kwargs=lgd_kwargs,savefig_settings=savefig_settings, mute=mute) else: res = mapping.map_all(lat=lat_all, lon=lon_all, criteria=archiveType_all, marker=marker_all, color =color_all, projection = projection, proj_default = proj_default, background = background,borders = borders, rivers = rivers, lakes = lakes, figsize = figsize, ax = ax, scatter_kwargs=scatter_kwargs, legend=legend, lgd_kwargs=lgd_kwargs,savefig_settings=savefig_settings, mute=mute) return res def plot_age_depth(self,figsize = [10,4], plt_kwargs=None, savefig_settings=None, mute=False, ensemble = False, D=None, num_traces = 10, ensemble_kwargs=None, envelope_kwargs = None, traces_kwargs = None): ''' Parameters ---------- figsize : List, optional Size of the figure. The default is [10,4]. plt_kwargs : dict, optional Arguments for basic plot. See Series.plot() for details. The default is None. savefig_settings : dict, optional the dictionary of arguments for plt.savefig(); some notes below: - "path" must be specified; it can be any existed or non-existed path, with or without a suffix; if the suffix is not given in "path", it will follow "format" - "format" can be one of {"pdf", "eps", "png", "ps"}. The default is None. mute : {True,False}, optional if True, the plot will not show; recommend to turn on when more modifications are going to be made on ax. The default is False. (going to be deprecated) ensemble : {True,False}, optional Whether to use age model ensembles stored in the file for the plot. The default is False. If no ensemble can be found, will error out. D : pyleoclim.Lipd, optional The pyleoclim.Lipd object from which the pyleoclim.LipdSeries is derived. The default is None. num_traces : int, optional Number of individual age models to plot. If not interested in plotting individual traces, set this parameter to 0 or None. The default is 10. ensemble_kwargs : dict, optional Parameters associated with identifying the chronEnsemble tables. See pyleoclim.LipdSeries.chronEnsembleToPaleo() for details. The default is None. envelope_kwargs : dict, optional Parameters to control the envelope plot. See pyleoclim.EnsembleSeries.plot_envelope() for details. The default is None. traces_kwargs : TYPE, optional Parameters to control the traces plot. See pyleoclim.EnsembleSeries.plot_traces() for details. The default is None. Raises ------ ValueError In ensemble mode, make sure that the LiPD object is given KeyError Depth information needed. Returns ------- fig,ax The figure See also -------- pyleoclim.core.ui.Lipd : Pyleoclim internal representation of a LiPD file pyleoclim.core.ui.Series.plot : Basic plotting in pyleoclim pyleoclim.core.ui.LipdSeries.chronEnsembleToPaleo : Function to map the ensemble table to a paleo depth. pyleoclim.core.ui.EnsembleSeries.plot_envelope : Create an envelope plot from an ensemble pyleoclim.core.ui.EnsembleSeries.plot_traces : Create a trace plot from an ensemble Examples -------- .. ipython:: python :okwarning: :okexcept: D = pyleo.Lipd('http://wiki.linked.earth/wiki/index.php/Special:WTLiPD?op=export&lipdid=Crystal.McCabe-Glynn.2013') ts=D.to_LipdSeries(number=2) ts.plot_age_depth() pyleo.closefig(fig) ''' if ensemble == True and D is None: raise ValueError("When an ensemble is requested, the corresponsind Lipd object must be supplied") meta=self.getMetadata() savefig_settings = {} if savefig_settings is None else savefig_settings.copy() plt_kwargs={} if plt_kwargs is None else plt_kwargs.copy() # get depth try: value_depth, label_depth = lipdutils.checkXaxis(self.lipd_ts,'depth') if 'depthUnits' in self.lipd_ts.keys(): units_depth = self.lipd_ts['depthUnits'] else: units_depth = 'NA' except: raise KeyError('No depth available in this record') # create a series for which time is actually depth if ensemble == False: ts = Series(time = self.time,value=value_depth, time_name=self.time_name,time_unit=self.time_unit, value_name=label_depth,value_unit=units_depth) plt_kwargs={} if plt_kwargs is None else plt_kwargs.copy() if 'marker' not in plt_kwargs.keys(): archiveType = lipdutils.LipdToOntology(meta['archiveType']).lower().replace(" ","") plt_kwargs.update({'marker':self.plot_default[archiveType][1]}) if 'color' not in plt_kwargs.keys(): archiveType = lipdutils.LipdToOntology(meta['archiveType']).lower().replace(" ","") plt_kwargs.update({'color':self.plot_default[archiveType][0]}) fig,ax = ts.plot(**plt_kwargs) elif ensemble == True: ensemble_kwargs = {} if ensemble_kwargs is None else ensemble_kwargs.copy() ens = self.chronEnsembleToPaleo(D,**ensemble_kwargs) # NOT A VERY ELEGANT SOLUTION: replace depth in the dictionary for item in ens.__dict__['series_list']: item.__dict__['value'] = value_depth item.__dict__['value_unit']=units_depth item.__dict__['value_name']='depth' envelope_kwargs={} if envelope_kwargs is None else envelope_kwargs.copy() if 'curve_clr'not in envelope_kwargs.keys(): archiveType = lipdutils.LipdToOntology(meta['archiveType']).lower().replace(" ","") envelope_kwargs.update({'curve_clr':self.plot_default[archiveType][0]}) if 'shade_clr'not in envelope_kwargs.keys(): archiveType = lipdutils.LipdToOntology(meta['archiveType']).lower().replace(" ","") envelope_kwargs.update({'shade_clr':self.plot_default[archiveType][0]}) ens2=ens.common_time() if num_traces > 0: envelope_kwargs.update({'mute':True}) fig,ax = ens2.plot_envelope(**envelope_kwargs) traces_kwargs={} if traces_kwargs is None else traces_kwargs.copy() if 'color' not in traces_kwargs.keys(): archiveType = lipdutils.LipdToOntology(meta['archiveType']).lower().replace(" ","") traces_kwargs.update({'color':self.plot_default[archiveType][0]}) if 'linestyle' not in traces_kwargs.keys(): traces_kwargs.update({'linestyle':'dashed'}) traces_kwargs.update({'ax':ax,'num_traces':num_traces}) ens2.plot_traces(**traces_kwargs) else: fig,ax=ens2.plot_envelope(**envelope_kwargs) if 'fig' in locals(): if 'path' in savefig_settings: plotting.savefig(fig, settings=savefig_settings) # else: # if not mute: # plotting.showfig(fig) return fig, ax else: return ax
LinkedEarth/Pyleoclim_util
pyleoclim/core/ui.py
Python
gpl-3.0
339,387
[ "CRYSTAL", "Gaussian", "TINKER" ]
3b15d6bb98a8b096f6c280019d70ecb32d7a408fb60b179ccf00f1c6e11ddcac
"""Support the ElkM1 Gold and ElkM1 EZ8 alarm/integration panels.""" import logging import re import voluptuous as vol from homeassistant.const import ( CONF_EXCLUDE, CONF_HOST, CONF_INCLUDE, CONF_PASSWORD, CONF_TEMPERATURE_UNIT, CONF_USERNAME) from homeassistant.core import HomeAssistant, callback # noqa from homeassistant.helpers import config_validation as cv from homeassistant.helpers import discovery from homeassistant.helpers.entity import Entity from homeassistant.helpers.typing import ConfigType # noqa REQUIREMENTS = ['elkm1-lib==0.7.13'] DOMAIN = 'elkm1' CONF_AREA = 'area' CONF_COUNTER = 'counter' CONF_ENABLED = 'enabled' CONF_KEYPAD = 'keypad' CONF_OUTPUT = 'output' CONF_PLC = 'plc' CONF_SETTING = 'setting' CONF_TASK = 'task' CONF_THERMOSTAT = 'thermostat' CONF_ZONE = 'zone' _LOGGER = logging.getLogger(__name__) SUPPORTED_DOMAINS = ['alarm_control_panel', 'climate', 'light', 'scene', 'sensor', 'switch'] SPEAK_SERVICE_SCHEMA = vol.Schema({ vol.Required('number'): vol.All(vol.Coerce(int), vol.Range(min=0, max=999)) }) def _host_validator(config): """Validate that a host is properly configured.""" if config[CONF_HOST].startswith('elks://'): if CONF_USERNAME not in config or CONF_PASSWORD not in config: raise vol.Invalid("Specify username and password for elks://") elif not config[CONF_HOST].startswith('elk://') and not config[ CONF_HOST].startswith('serial://'): raise vol.Invalid("Invalid host URL") return config def _elk_range_validator(rng): def _housecode_to_int(val): match = re.search(r'^([a-p])(0[1-9]|1[0-6]|[1-9])$', val.lower()) if match: return (ord(match.group(1)) - ord('a')) * 16 + int(match.group(2)) raise vol.Invalid("Invalid range") def _elk_value(val): return int(val) if val.isdigit() else _housecode_to_int(val) vals = [s.strip() for s in str(rng).split('-')] start = _elk_value(vals[0]) end = start if len(vals) == 1 else _elk_value(vals[1]) return (start, end) CONFIG_SCHEMA_SUBDOMAIN = vol.Schema({ vol.Optional(CONF_ENABLED, default=True): cv.boolean, vol.Optional(CONF_INCLUDE, default=[]): [_elk_range_validator], vol.Optional(CONF_EXCLUDE, default=[]): [_elk_range_validator], }) CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_USERNAME, default=''): cv.string, vol.Optional(CONF_PASSWORD, default=''): cv.string, vol.Optional(CONF_TEMPERATURE_UNIT, default='F'): cv.temperature_unit, vol.Optional(CONF_AREA, default={}): CONFIG_SCHEMA_SUBDOMAIN, vol.Optional(CONF_COUNTER, default={}): CONFIG_SCHEMA_SUBDOMAIN, vol.Optional(CONF_KEYPAD, default={}): CONFIG_SCHEMA_SUBDOMAIN, vol.Optional(CONF_OUTPUT, default={}): CONFIG_SCHEMA_SUBDOMAIN, vol.Optional(CONF_PLC, default={}): CONFIG_SCHEMA_SUBDOMAIN, vol.Optional(CONF_SETTING, default={}): CONFIG_SCHEMA_SUBDOMAIN, vol.Optional(CONF_TASK, default={}): CONFIG_SCHEMA_SUBDOMAIN, vol.Optional(CONF_THERMOSTAT, default={}): CONFIG_SCHEMA_SUBDOMAIN, vol.Optional(CONF_ZONE, default={}): CONFIG_SCHEMA_SUBDOMAIN, }, _host_validator, ) }, extra=vol.ALLOW_EXTRA) async def async_setup(hass: HomeAssistant, hass_config: ConfigType) -> bool: """Set up the Elk M1 platform.""" from elkm1_lib.const import Max import elkm1_lib as elkm1 configs = { CONF_AREA: Max.AREAS.value, CONF_COUNTER: Max.COUNTERS.value, CONF_KEYPAD: Max.KEYPADS.value, CONF_OUTPUT: Max.OUTPUTS.value, CONF_PLC: Max.LIGHTS.value, CONF_SETTING: Max.SETTINGS.value, CONF_TASK: Max.TASKS.value, CONF_THERMOSTAT: Max.THERMOSTATS.value, CONF_ZONE: Max.ZONES.value, } def _included(ranges, set_to, values): for rng in ranges: if not rng[0] <= rng[1] <= len(values): raise vol.Invalid("Invalid range {}".format(rng)) values[rng[0]-1:rng[1]] = [set_to] * (rng[1] - rng[0] + 1) conf = hass_config[DOMAIN] config = {'temperature_unit': conf[CONF_TEMPERATURE_UNIT]} config['panel'] = {'enabled': True, 'included': [True]} for item, max_ in configs.items(): config[item] = {'enabled': conf[item][CONF_ENABLED], 'included': [not conf[item]['include']] * max_} try: _included(conf[item]['include'], True, config[item]['included']) _included(conf[item]['exclude'], False, config[item]['included']) except (ValueError, vol.Invalid) as err: _LOGGER.error("Config item: %s; %s", item, err) return False elk = elkm1.Elk({'url': conf[CONF_HOST], 'userid': conf[CONF_USERNAME], 'password': conf[CONF_PASSWORD]}) elk.connect() _create_elk_services(hass, elk) hass.data[DOMAIN] = {'elk': elk, 'config': config, 'keypads': {}} for component in SUPPORTED_DOMAINS: hass.async_create_task( discovery.async_load_platform(hass, component, DOMAIN, {}, hass_config)) return True def _create_elk_services(hass, elk): def _speak_word_service(service): elk.panel.speak_word(service.data.get('number')) def _speak_phrase_service(service): elk.panel.speak_phrase(service.data.get('number')) hass.services.async_register( DOMAIN, 'speak_word', _speak_word_service, SPEAK_SERVICE_SCHEMA) hass.services.async_register( DOMAIN, 'speak_phrase', _speak_phrase_service, SPEAK_SERVICE_SCHEMA) def create_elk_entities(hass, elk_elements, element_type, class_, entities): """Create the ElkM1 devices of a particular class.""" elk_data = hass.data[DOMAIN] if elk_data['config'][element_type]['enabled']: elk = elk_data['elk'] for element in elk_elements: if elk_data['config'][element_type]['included'][element.index]: entities.append(class_(element, elk, elk_data)) return entities class ElkEntity(Entity): """Base class for all Elk entities.""" def __init__(self, element, elk, elk_data): """Initialize the base of all Elk devices.""" self._elk = elk self._element = element self._temperature_unit = elk_data['config']['temperature_unit'] self._unique_id = 'elkm1_{}'.format( self._element.default_name('_').lower()) @property def name(self): """Name of the element.""" return self._element.name @property def unique_id(self): """Return unique id of the element.""" return self._unique_id @property def should_poll(self) -> bool: """Don't poll this device.""" return False @property def device_state_attributes(self): """Return the default attributes of the element.""" return {**self._element.as_dict(), **self.initial_attrs()} @property def available(self): """Is the entity available to be updated.""" return self._elk.is_connected() def initial_attrs(self): """Return the underlying element's attributes as a dict.""" attrs = {} attrs['index'] = self._element.index + 1 return attrs def _element_changed(self, element, changeset): pass @callback def _element_callback(self, element, changeset): """Handle callback from an Elk element that has changed.""" self._element_changed(element, changeset) self.async_schedule_update_ha_state(True) async def async_added_to_hass(self): """Register callback for ElkM1 changes and update entity state.""" self._element.add_callback(self._element_callback) self._element_callback(self._element, {})
jamespcole/home-assistant
homeassistant/components/elkm1/__init__.py
Python
apache-2.0
8,007
[ "Elk" ]
3ddf12470f3aef6dd752a0e6d331a70cbcf194234d7730e6c251870fe91b7261
import imp import os import marshal import struct import sys from cStringIO import StringIO is_jython = sys.platform.startswith('java') from compiler import ast, parse, walk, syntax from compiler import misc, future, symbols from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICT, \ SC_FREE, SC_CELL from compiler.consts import (CO_VARARGS, CO_VARKEYWORDS, CO_NEWLOCALS, CO_NESTED, CO_GENERATOR, CO_FUTURE_DIVISION, CO_FUTURE_ABSIMPORT, CO_FUTURE_WITH_STATEMENT, CO_FUTURE_PRINT_FUNCTION) if not is_jython: from compiler.pyassem import TupleArg else: TupleArg = None # XXX The version-specific code can go, since this code only works with 2.x. # Do we have Python 1.x or Python 2.x? try: VERSION = sys.version_info[0] except AttributeError: VERSION = 1 callfunc_opcode_info = { # (Have *args, Have **args) : opcode (0,0) : "CALL_FUNCTION", (1,0) : "CALL_FUNCTION_VAR", (0,1) : "CALL_FUNCTION_KW", (1,1) : "CALL_FUNCTION_VAR_KW", } LOOP = 1 EXCEPT = 2 TRY_FINALLY = 3 END_FINALLY = 4 def compileFile(filename, display=0): f = open(filename, 'U') buf = f.read() f.close() mod = Module(buf, filename) try: mod.compile(display) except SyntaxError: raise else: f = open(filename + "c", "wb") mod.dump(f) f.close() if is_jython: # use __builtin__ compile compile = compile else: def compile(source, filename, mode, flags=None, dont_inherit=None): """Replacement for builtin compile() function""" if flags is not None or dont_inherit is not None: raise RuntimeError, "not implemented yet" if mode == "single": gen = Interactive(source, filename) elif mode == "exec": gen = Module(source, filename) elif mode == "eval": gen = Expression(source, filename) else: raise ValueError("compile() 3rd arg must be 'exec' or " "'eval' or 'single'") gen.compile() return gen.code class AbstractCompileMode: mode = None # defined by subclass def __init__(self, source, filename): self.source = source self.filename = filename self.code = None def _get_tree(self): tree = parse(self.source, self.mode) misc.set_filename(self.filename, tree) syntax.check(tree) return tree def compile(self): pass # implemented by subclass def getCode(self): return self.code class Expression(AbstractCompileMode): mode = "eval" def compile(self): tree = self._get_tree() gen = ExpressionCodeGenerator(tree) self.code = gen.getCode() class Interactive(AbstractCompileMode): mode = "single" def compile(self): tree = self._get_tree() gen = InteractiveCodeGenerator(tree) self.code = gen.getCode() class Module(AbstractCompileMode): mode = "exec" def compile(self, display=0): tree = self._get_tree() gen = ModuleCodeGenerator(tree) if display: import pprint print pprint.pprint(tree) self.code = gen.getCode() def dump(self, f): f.write(self.getPycHeader()) marshal.dump(self.code, f) MAGIC = None if is_jython else imp.get_magic() def getPycHeader(self): # compile.c uses marshal to write a long directly, with # calling the interface that would also generate a 1-byte code # to indicate the type of the value. simplest way to get the # same effect is to call marshal and then skip the code. mtime = os.path.getmtime(self.filename) mtime = struct.pack('<i', mtime) return self.MAGIC + mtime class LocalNameFinder: """Find local names in scope""" def __init__(self, names=()): self.names = misc.Set() self.globals = misc.Set() for name in names: self.names.add(name) # XXX list comprehensions and for loops def getLocals(self): for elt in self.globals.elements(): if self.names.has_elt(elt): self.names.remove(elt) return self.names def visitDict(self, node): pass def visitGlobal(self, node): for name in node.names: self.globals.add(name) def visitFunction(self, node): self.names.add(node.name) def visitLambda(self, node): pass def visitImport(self, node): for name, alias in node.names: self.names.add(alias or name) def visitFrom(self, node): for name, alias in node.names: self.names.add(alias or name) def visitClass(self, node): self.names.add(node.name) def visitAssName(self, node): self.names.add(node.name) def is_constant_false(node): if isinstance(node, ast.Const): if not node.value: return 1 return 0 class CodeGenerator: """Defines basic code generator for Python bytecode This class is an abstract base class. Concrete subclasses must define an __init__() that defines self.graph and then calls the __init__() defined in this class. The concrete class must also define the class attributes NameFinder, FunctionGen, and ClassGen. These attributes can be defined in the initClass() method, which is a hook for initializing these methods after all the classes have been defined. """ optimized = 0 # is namespace access optimized? __initialized = None class_name = None # provide default for instance variable def __init__(self): if self.__initialized is None: self.initClass() self.__class__.__initialized = 1 self.checkClass() self.locals = misc.Stack() self.setups = misc.Stack() self.last_lineno = None self._setupGraphDelegation() self._div_op = "BINARY_DIVIDE" # XXX set flags based on future features futures = self.get_module().futures for feature in futures: if feature == "division": self.graph.setFlag(CO_FUTURE_DIVISION) self._div_op = "BINARY_TRUE_DIVIDE" elif feature == "absolute_import": self.graph.setFlag(CO_FUTURE_ABSIMPORT) elif feature == "with_statement": self.graph.setFlag(CO_FUTURE_WITH_STATEMENT) elif feature == "print_function": self.graph.setFlag(CO_FUTURE_PRINT_FUNCTION) def initClass(self): """This method is called once for each class""" def checkClass(self): """Verify that class is constructed correctly""" try: assert hasattr(self, 'graph') assert getattr(self, 'NameFinder') assert getattr(self, 'FunctionGen') assert getattr(self, 'ClassGen') except AssertionError, msg: intro = "Bad class construction for %s" % self.__class__.__name__ raise AssertionError, intro def _setupGraphDelegation(self): self.emit = self.graph.emit self.newBlock = self.graph.newBlock self.startBlock = self.graph.startBlock self.nextBlock = self.graph.nextBlock self.setDocstring = self.graph.setDocstring def getCode(self): """Return a code object""" return self.graph.getCode() def mangle(self, name): if self.class_name is not None: return misc.mangle(name, self.class_name) else: return name def parseSymbols(self, tree): s = symbols.SymbolVisitor() walk(tree, s) return s.scopes def get_module(self): raise RuntimeError, "should be implemented by subclasses" # Next five methods handle name access def isLocalName(self, name): return self.locals.top().has_elt(name) def storeName(self, name): self._nameOp('STORE', name) def loadName(self, name): self._nameOp('LOAD', name) def delName(self, name): self._nameOp('DELETE', name) def _nameOp(self, prefix, name): name = self.mangle(name) scope = self.scope.check_name(name) if scope == SC_LOCAL: if not self.optimized: self.emit(prefix + '_NAME', name) else: self.emit(prefix + '_FAST', name) elif scope == SC_GLOBAL_EXPLICT: self.emit(prefix + '_GLOBAL', name) elif scope == SC_GLOBAL_IMPLICIT: if not self.optimized: self.emit(prefix + '_NAME', name) else: self.emit(prefix + '_GLOBAL', name) elif scope == SC_FREE or scope == SC_CELL: self.emit(prefix + '_DEREF', name) else: raise RuntimeError, "unsupported scope for var %s: %d" % \ (name, scope) def _implicitNameOp(self, prefix, name): """Emit name ops for names generated implicitly by for loops The interpreter generates names that start with a period or dollar sign. The symbol table ignores these names because they aren't present in the program text. """ if self.optimized: self.emit(prefix + '_FAST', name) else: self.emit(prefix + '_NAME', name) # The set_lineno() function and the explicit emit() calls for # SET_LINENO below are only used to generate the line number table. # As of Python 2.3, the interpreter does not have a SET_LINENO # instruction. pyassem treats SET_LINENO opcodes as a special case. def set_lineno(self, node, force=False): """Emit SET_LINENO if necessary. The instruction is considered necessary if the node has a lineno attribute and it is different than the last lineno emitted. Returns true if SET_LINENO was emitted. There are no rules for when an AST node should have a lineno attribute. The transformer and AST code need to be reviewed and a consistent policy implemented and documented. Until then, this method works around missing line numbers. """ lineno = getattr(node, 'lineno', None) if lineno is not None and (lineno != self.last_lineno or force): self.emit('SET_LINENO', lineno) self.last_lineno = lineno return True return False # The first few visitor methods handle nodes that generator new # code objects. They use class attributes to determine what # specialized code generators to use. NameFinder = LocalNameFinder FunctionGen = None ClassGen = None def visitModule(self, node): self.scopes = self.parseSymbols(node) self.scope = self.scopes[node] self.emit('SET_LINENO', 0) if node.doc: self.emit('LOAD_CONST', node.doc) self.storeName('__doc__') lnf = walk(node.node, self.NameFinder(), verbose=0) self.locals.push(lnf.getLocals()) self.visit(node.node) self.emit('LOAD_CONST', None) self.emit('RETURN_VALUE') def visitExpression(self, node): self.set_lineno(node) self.scopes = self.parseSymbols(node) self.scope = self.scopes[node] self.visit(node.node) self.emit('RETURN_VALUE') def visitFunction(self, node): self._visitFuncOrLambda(node, isLambda=0) if node.doc: self.setDocstring(node.doc) self.storeName(node.name) def visitLambda(self, node): self._visitFuncOrLambda(node, isLambda=1) def _visitFuncOrLambda(self, node, isLambda=0): if not isLambda and node.decorators: for decorator in node.decorators.nodes: self.visit(decorator) ndecorators = len(node.decorators.nodes) else: ndecorators = 0 gen = self.FunctionGen(node, self.scopes, isLambda, self.class_name, self.get_module()) walk(node.code, gen) gen.finish() self.set_lineno(node) for default in node.defaults: self.visit(default) self._makeClosure(gen, len(node.defaults)) for i in range(ndecorators): self.emit('CALL_FUNCTION', 1) def visitClass(self, node): gen = self.ClassGen(node, self.scopes, self.get_module()) walk(node.code, gen) gen.finish() self.set_lineno(node) self.emit('LOAD_CONST', node.name) for base in node.bases: self.visit(base) self.emit('BUILD_TUPLE', len(node.bases)) self._makeClosure(gen, 0) self.emit('CALL_FUNCTION', 0) self.emit('BUILD_CLASS') self.storeName(node.name) # The rest are standard visitor methods # The next few implement control-flow statements def visitIf(self, node): end = self.newBlock() numtests = len(node.tests) for i in range(numtests): test, suite = node.tests[i] if is_constant_false(test): # XXX will need to check generator stuff here continue self.set_lineno(test) self.visit(test) nextTest = self.newBlock() self.emit('JUMP_IF_FALSE', nextTest) self.nextBlock() self.emit('POP_TOP') self.visit(suite) self.emit('JUMP_FORWARD', end) self.startBlock(nextTest) self.emit('POP_TOP') if node.else_: self.visit(node.else_) self.nextBlock(end) def visitWhile(self, node): self.set_lineno(node) loop = self.newBlock() else_ = self.newBlock() after = self.newBlock() self.emit('SETUP_LOOP', after) self.nextBlock(loop) self.setups.push((LOOP, loop)) self.set_lineno(node, force=True) self.visit(node.test) self.emit('JUMP_IF_FALSE', else_ or after) self.nextBlock() self.emit('POP_TOP') self.visit(node.body) self.emit('JUMP_ABSOLUTE', loop) self.startBlock(else_) # or just the POPs if not else clause self.emit('POP_TOP') self.emit('POP_BLOCK') self.setups.pop() if node.else_: self.visit(node.else_) self.nextBlock(after) def visitFor(self, node): start = self.newBlock() anchor = self.newBlock() after = self.newBlock() self.setups.push((LOOP, start)) self.set_lineno(node) self.emit('SETUP_LOOP', after) self.visit(node.list) self.emit('GET_ITER') self.nextBlock(start) self.set_lineno(node, force=1) self.emit('FOR_ITER', anchor) self.visit(node.assign) self.visit(node.body) self.emit('JUMP_ABSOLUTE', start) self.nextBlock(anchor) self.emit('POP_BLOCK') self.setups.pop() if node.else_: self.visit(node.else_) self.nextBlock(after) def visitBreak(self, node): if not self.setups: raise SyntaxError, "'break' outside loop (%s, %d)" % \ (node.filename, node.lineno) self.set_lineno(node) self.emit('BREAK_LOOP') def visitContinue(self, node): if not self.setups: raise SyntaxError, "'continue' outside loop (%s, %d)" % \ (node.filename, node.lineno) kind, block = self.setups.top() if kind == LOOP: self.set_lineno(node) self.emit('JUMP_ABSOLUTE', block) self.nextBlock() elif kind == EXCEPT or kind == TRY_FINALLY: self.set_lineno(node) # find the block that starts the loop top = len(self.setups) while top > 0: top = top - 1 kind, loop_block = self.setups[top] if kind == LOOP: break if kind != LOOP: raise SyntaxError, "'continue' outside loop (%s, %d)" % \ (node.filename, node.lineno) self.emit('CONTINUE_LOOP', loop_block) self.nextBlock() elif kind == END_FINALLY: msg = "'continue' not allowed inside 'finally' clause (%s, %d)" raise SyntaxError, msg % (node.filename, node.lineno) def visitTest(self, node, jump): end = self.newBlock() for child in node.nodes[:-1]: self.visit(child) self.emit(jump, end) self.nextBlock() self.emit('POP_TOP') self.visit(node.nodes[-1]) self.nextBlock(end) def visitAnd(self, node): self.visitTest(node, 'JUMP_IF_FALSE') def visitOr(self, node): self.visitTest(node, 'JUMP_IF_TRUE') def visitIfExp(self, node): endblock = self.newBlock() elseblock = self.newBlock() self.visit(node.test) self.emit('JUMP_IF_FALSE', elseblock) self.emit('POP_TOP') self.visit(node.then) self.emit('JUMP_FORWARD', endblock) self.nextBlock(elseblock) self.emit('POP_TOP') self.visit(node.else_) self.nextBlock(endblock) def visitCompare(self, node): self.visit(node.expr) cleanup = self.newBlock() for op, code in node.ops[:-1]: self.visit(code) self.emit('DUP_TOP') self.emit('ROT_THREE') self.emit('COMPARE_OP', op) self.emit('JUMP_IF_FALSE', cleanup) self.nextBlock() self.emit('POP_TOP') # now do the last comparison if node.ops: op, code = node.ops[-1] self.visit(code) self.emit('COMPARE_OP', op) if len(node.ops) > 1: end = self.newBlock() self.emit('JUMP_FORWARD', end) self.startBlock(cleanup) self.emit('ROT_TWO') self.emit('POP_TOP') self.nextBlock(end) # list comprehensions __list_count = 0 def visitListComp(self, node): self.set_lineno(node) # setup list append = "$append%d" % self.__list_count self.__list_count = self.__list_count + 1 self.emit('BUILD_LIST', 0) self.emit('DUP_TOP') self.emit('LOAD_ATTR', 'append') self._implicitNameOp('STORE', append) stack = [] for i, for_ in zip(range(len(node.quals)), node.quals): start, anchor = self.visit(for_) cont = None for if_ in for_.ifs: if cont is None: cont = self.newBlock() self.visit(if_, cont) stack.insert(0, (start, cont, anchor)) self._implicitNameOp('LOAD', append) self.visit(node.expr) self.emit('CALL_FUNCTION', 1) self.emit('POP_TOP') for start, cont, anchor in stack: if cont: skip_one = self.newBlock() self.emit('JUMP_FORWARD', skip_one) self.startBlock(cont) self.emit('POP_TOP') self.nextBlock(skip_one) self.emit('JUMP_ABSOLUTE', start) self.startBlock(anchor) self._implicitNameOp('DELETE', append) self.__list_count = self.__list_count - 1 def visitListCompFor(self, node): start = self.newBlock() anchor = self.newBlock() self.visit(node.list) self.emit('GET_ITER') self.nextBlock(start) self.set_lineno(node, force=True) self.emit('FOR_ITER', anchor) self.nextBlock() self.visit(node.assign) return start, anchor def visitListCompIf(self, node, branch): self.set_lineno(node, force=True) self.visit(node.test) self.emit('JUMP_IF_FALSE', branch) self.newBlock() self.emit('POP_TOP') def _makeClosure(self, gen, args): frees = gen.scope.get_free_vars() if frees: for name in frees: self.emit('LOAD_CLOSURE', name) self.emit('BUILD_TUPLE', len(frees)) self.emit('LOAD_CONST', gen) self.emit('MAKE_CLOSURE', args) else: self.emit('LOAD_CONST', gen) self.emit('MAKE_FUNCTION', args) def visitGenExpr(self, node): gen = GenExprCodeGenerator(node, self.scopes, self.class_name, self.get_module()) walk(node.code, gen) gen.finish() self.set_lineno(node) self._makeClosure(gen, 0) # precomputation of outmost iterable self.visit(node.code.quals[0].iter) self.emit('GET_ITER') self.emit('CALL_FUNCTION', 1) def visitGenExprInner(self, node): self.set_lineno(node) # setup list stack = [] for i, for_ in zip(range(len(node.quals)), node.quals): start, anchor, end = self.visit(for_) cont = None for if_ in for_.ifs: if cont is None: cont = self.newBlock() self.visit(if_, cont) stack.insert(0, (start, cont, anchor, end)) self.visit(node.expr) self.emit('YIELD_VALUE') self.emit('POP_TOP') for start, cont, anchor, end in stack: if cont: skip_one = self.newBlock() self.emit('JUMP_FORWARD', skip_one) self.startBlock(cont) self.emit('POP_TOP') self.nextBlock(skip_one) self.emit('JUMP_ABSOLUTE', start) self.startBlock(anchor) self.emit('POP_BLOCK') self.setups.pop() self.startBlock(end) self.emit('LOAD_CONST', None) def visitGenExprFor(self, node): start = self.newBlock() anchor = self.newBlock() end = self.newBlock() self.setups.push((LOOP, start)) self.emit('SETUP_LOOP', end) if node.is_outmost: self.loadName('.0') else: self.visit(node.iter) self.emit('GET_ITER') self.nextBlock(start) self.set_lineno(node, force=True) self.emit('FOR_ITER', anchor) self.nextBlock() self.visit(node.assign) return start, anchor, end def visitGenExprIf(self, node, branch): self.set_lineno(node, force=True) self.visit(node.test) self.emit('JUMP_IF_FALSE', branch) self.newBlock() self.emit('POP_TOP') # exception related def visitAssert(self, node): # XXX would be interesting to implement this via a # transformation of the AST before this stage if __debug__: end = self.newBlock() self.set_lineno(node) # XXX AssertionError appears to be special case -- it is always # loaded as a global even if there is a local name. I guess this # is a sort of renaming op. self.nextBlock() self.visit(node.test) self.emit('JUMP_IF_TRUE', end) self.nextBlock() self.emit('POP_TOP') self.emit('LOAD_GLOBAL', 'AssertionError') if node.fail: self.visit(node.fail) self.emit('RAISE_VARARGS', 2) else: self.emit('RAISE_VARARGS', 1) self.nextBlock(end) self.emit('POP_TOP') def visitRaise(self, node): self.set_lineno(node) n = 0 if node.expr1: self.visit(node.expr1) n = n + 1 if node.expr2: self.visit(node.expr2) n = n + 1 if node.expr3: self.visit(node.expr3) n = n + 1 self.emit('RAISE_VARARGS', n) def visitTryExcept(self, node): body = self.newBlock() handlers = self.newBlock() end = self.newBlock() if node.else_: lElse = self.newBlock() else: lElse = end self.set_lineno(node) self.emit('SETUP_EXCEPT', handlers) self.nextBlock(body) self.setups.push((EXCEPT, body)) self.visit(node.body) self.emit('POP_BLOCK') self.setups.pop() self.emit('JUMP_FORWARD', lElse) self.startBlock(handlers) last = len(node.handlers) - 1 for i in range(len(node.handlers)): expr, target, body = node.handlers[i] self.set_lineno(expr) if expr: self.emit('DUP_TOP') self.visit(expr) self.emit('COMPARE_OP', 'exception match') next = self.newBlock() self.emit('JUMP_IF_FALSE', next) self.nextBlock() self.emit('POP_TOP') self.emit('POP_TOP') if target: self.visit(target) else: self.emit('POP_TOP') self.emit('POP_TOP') self.visit(body) self.emit('JUMP_FORWARD', end) if expr: self.nextBlock(next) else: self.nextBlock() if expr: # XXX self.emit('POP_TOP') self.emit('END_FINALLY') if node.else_: self.nextBlock(lElse) self.visit(node.else_) self.nextBlock(end) def visitTryFinally(self, node): body = self.newBlock() final = self.newBlock() self.set_lineno(node) self.emit('SETUP_FINALLY', final) self.nextBlock(body) self.setups.push((TRY_FINALLY, body)) self.visit(node.body) self.emit('POP_BLOCK') self.setups.pop() self.emit('LOAD_CONST', None) self.nextBlock(final) self.setups.push((END_FINALLY, final)) self.visit(node.final) self.emit('END_FINALLY') self.setups.pop() __with_count = 0 def visitWith(self, node): body = self.newBlock() final = self.newBlock() exitvar = "$exit%d" % self.__with_count valuevar = "$value%d" % self.__with_count self.__with_count += 1 self.set_lineno(node) self.visit(node.expr) self.emit('DUP_TOP') self.emit('LOAD_ATTR', '__exit__') self._implicitNameOp('STORE', exitvar) self.emit('LOAD_ATTR', '__enter__') self.emit('CALL_FUNCTION', 0) if node.vars is None: self.emit('POP_TOP') else: self._implicitNameOp('STORE', valuevar) self.emit('SETUP_FINALLY', final) self.nextBlock(body) self.setups.push((TRY_FINALLY, body)) if node.vars is not None: self._implicitNameOp('LOAD', valuevar) self._implicitNameOp('DELETE', valuevar) self.visit(node.vars) self.visit(node.body) self.emit('POP_BLOCK') self.setups.pop() self.emit('LOAD_CONST', None) self.nextBlock(final) self.setups.push((END_FINALLY, final)) self._implicitNameOp('LOAD', exitvar) self._implicitNameOp('DELETE', exitvar) self.emit('WITH_CLEANUP') self.emit('END_FINALLY') self.setups.pop() self.__with_count -= 1 # misc def visitDiscard(self, node): self.set_lineno(node) self.visit(node.expr) self.emit('POP_TOP') def visitConst(self, node): self.emit('LOAD_CONST', node.value) def visitKeyword(self, node): self.emit('LOAD_CONST', node.name) self.visit(node.expr) def visitGlobal(self, node): # no code to generate pass def visitName(self, node): self.set_lineno(node) self.loadName(node.name) def visitPass(self, node): self.set_lineno(node) def visitImport(self, node): self.set_lineno(node) level = 0 if self.graph.checkFlag(CO_FUTURE_ABSIMPORT) else -1 for name, alias in node.names: if VERSION > 1: self.emit('LOAD_CONST', level) self.emit('LOAD_CONST', None) self.emit('IMPORT_NAME', name) mod = name.split(".")[0] if alias: self._resolveDots(name) self.storeName(alias) else: self.storeName(mod) def visitFrom(self, node): self.set_lineno(node) level = node.level if level == 0 and not self.graph.checkFlag(CO_FUTURE_ABSIMPORT): level = -1 fromlist = map(lambda (name, alias): name, node.names) if VERSION > 1: self.emit('LOAD_CONST', level) self.emit('LOAD_CONST', tuple(fromlist)) self.emit('IMPORT_NAME', node.modname) for name, alias in node.names: if VERSION > 1: if name == '*': self.namespace = 0 self.emit('IMPORT_STAR') # There can only be one name w/ from ... import * assert len(node.names) == 1 return else: self.emit('IMPORT_FROM', name) self._resolveDots(name) self.storeName(alias or name) else: self.emit('IMPORT_FROM', name) self.emit('POP_TOP') def _resolveDots(self, name): elts = name.split(".") if len(elts) == 1: return for elt in elts[1:]: self.emit('LOAD_ATTR', elt) def visitGetattr(self, node): self.visit(node.expr) self.emit('LOAD_ATTR', self.mangle(node.attrname)) # next five implement assignments def visitAssign(self, node): self.set_lineno(node) self.visit(node.expr) dups = len(node.nodes) - 1 for i in range(len(node.nodes)): elt = node.nodes[i] if i < dups: self.emit('DUP_TOP') if isinstance(elt, ast.Node): self.visit(elt) def visitAssName(self, node): if node.flags == 'OP_ASSIGN': self.storeName(node.name) elif node.flags == 'OP_DELETE': self.set_lineno(node) self.delName(node.name) else: print "oops", node.flags def visitAssAttr(self, node): self.visit(node.expr) if node.flags == 'OP_ASSIGN': self.emit('STORE_ATTR', self.mangle(node.attrname)) elif node.flags == 'OP_DELETE': self.emit('DELETE_ATTR', self.mangle(node.attrname)) else: print "warning: unexpected flags:", node.flags print node def _visitAssSequence(self, node, op='UNPACK_SEQUENCE'): if findOp(node) != 'OP_DELETE': self.emit(op, len(node.nodes)) for child in node.nodes: self.visit(child) if VERSION > 1: visitAssTuple = _visitAssSequence visitAssList = _visitAssSequence else: def visitAssTuple(self, node): self._visitAssSequence(node, 'UNPACK_TUPLE') def visitAssList(self, node): self._visitAssSequence(node, 'UNPACK_LIST') # augmented assignment def visitAugAssign(self, node): self.set_lineno(node) aug_node = wrap_aug(node.node) self.visit(aug_node, "load") self.visit(node.expr) self.emit(self._augmented_opcode[node.op]) self.visit(aug_node, "store") _augmented_opcode = { '+=' : 'INPLACE_ADD', '-=' : 'INPLACE_SUBTRACT', '*=' : 'INPLACE_MULTIPLY', '/=' : 'INPLACE_DIVIDE', '//=': 'INPLACE_FLOOR_DIVIDE', '%=' : 'INPLACE_MODULO', '**=': 'INPLACE_POWER', '>>=': 'INPLACE_RSHIFT', '<<=': 'INPLACE_LSHIFT', '&=' : 'INPLACE_AND', '^=' : 'INPLACE_XOR', '|=' : 'INPLACE_OR', } def visitAugName(self, node, mode): if mode == "load": self.loadName(node.name) elif mode == "store": self.storeName(node.name) def visitAugGetattr(self, node, mode): if mode == "load": self.visit(node.expr) self.emit('DUP_TOP') self.emit('LOAD_ATTR', self.mangle(node.attrname)) elif mode == "store": self.emit('ROT_TWO') self.emit('STORE_ATTR', self.mangle(node.attrname)) def visitAugSlice(self, node, mode): if mode == "load": self.visitSlice(node, 1) elif mode == "store": slice = 0 if node.lower: slice = slice | 1 if node.upper: slice = slice | 2 if slice == 0: self.emit('ROT_TWO') elif slice == 3: self.emit('ROT_FOUR') else: self.emit('ROT_THREE') self.emit('STORE_SLICE+%d' % slice) def visitAugSubscript(self, node, mode): if mode == "load": self.visitSubscript(node, 1) elif mode == "store": self.emit('ROT_THREE') self.emit('STORE_SUBSCR') def visitExec(self, node): self.visit(node.expr) if node.locals is None: self.emit('LOAD_CONST', None) else: self.visit(node.locals) if node.globals is None: self.emit('DUP_TOP') else: self.visit(node.globals) self.emit('EXEC_STMT') def visitCallFunc(self, node): pos = 0 kw = 0 self.set_lineno(node) self.visit(node.node) for arg in node.args: self.visit(arg) if isinstance(arg, ast.Keyword): kw = kw + 1 else: pos = pos + 1 if node.star_args is not None: self.visit(node.star_args) if node.dstar_args is not None: self.visit(node.dstar_args) have_star = node.star_args is not None have_dstar = node.dstar_args is not None opcode = callfunc_opcode_info[have_star, have_dstar] self.emit(opcode, kw << 8 | pos) def visitPrint(self, node, newline=0): self.set_lineno(node) if node.dest: self.visit(node.dest) for child in node.nodes: if node.dest: self.emit('DUP_TOP') self.visit(child) if node.dest: self.emit('ROT_TWO') self.emit('PRINT_ITEM_TO') else: self.emit('PRINT_ITEM') if node.dest and not newline: self.emit('POP_TOP') def visitPrintnl(self, node): self.visitPrint(node, newline=1) if node.dest: self.emit('PRINT_NEWLINE_TO') else: self.emit('PRINT_NEWLINE') def visitReturn(self, node): self.set_lineno(node) self.visit(node.value) self.emit('RETURN_VALUE') def visitYield(self, node): self.set_lineno(node) self.visit(node.value) self.emit('YIELD_VALUE') # slice and subscript stuff def visitSlice(self, node, aug_flag=None): # aug_flag is used by visitAugSlice self.visit(node.expr) slice = 0 if node.lower: self.visit(node.lower) slice = slice | 1 if node.upper: self.visit(node.upper) slice = slice | 2 if aug_flag: if slice == 0: self.emit('DUP_TOP') elif slice == 3: self.emit('DUP_TOPX', 3) else: self.emit('DUP_TOPX', 2) if node.flags == 'OP_APPLY': self.emit('SLICE+%d' % slice) elif node.flags == 'OP_ASSIGN': self.emit('STORE_SLICE+%d' % slice) elif node.flags == 'OP_DELETE': self.emit('DELETE_SLICE+%d' % slice) else: print "weird slice", node.flags raise def visitSubscript(self, node, aug_flag=None): self.visit(node.expr) for sub in node.subs: self.visit(sub) if len(node.subs) > 1: self.emit('BUILD_TUPLE', len(node.subs)) if aug_flag: self.emit('DUP_TOPX', 2) if node.flags == 'OP_APPLY': self.emit('BINARY_SUBSCR') elif node.flags == 'OP_ASSIGN': self.emit('STORE_SUBSCR') elif node.flags == 'OP_DELETE': self.emit('DELETE_SUBSCR') # binary ops def binaryOp(self, node, op): self.visit(node.left) self.visit(node.right) self.emit(op) def visitAdd(self, node): return self.binaryOp(node, 'BINARY_ADD') def visitSub(self, node): return self.binaryOp(node, 'BINARY_SUBTRACT') def visitMul(self, node): return self.binaryOp(node, 'BINARY_MULTIPLY') def visitDiv(self, node): return self.binaryOp(node, self._div_op) def visitFloorDiv(self, node): return self.binaryOp(node, 'BINARY_FLOOR_DIVIDE') def visitMod(self, node): return self.binaryOp(node, 'BINARY_MODULO') def visitPower(self, node): return self.binaryOp(node, 'BINARY_POWER') def visitLeftShift(self, node): return self.binaryOp(node, 'BINARY_LSHIFT') def visitRightShift(self, node): return self.binaryOp(node, 'BINARY_RSHIFT') # unary ops def unaryOp(self, node, op): self.visit(node.expr) self.emit(op) def visitInvert(self, node): return self.unaryOp(node, 'UNARY_INVERT') def visitUnarySub(self, node): return self.unaryOp(node, 'UNARY_NEGATIVE') def visitUnaryAdd(self, node): return self.unaryOp(node, 'UNARY_POSITIVE') def visitUnaryInvert(self, node): return self.unaryOp(node, 'UNARY_INVERT') def visitNot(self, node): return self.unaryOp(node, 'UNARY_NOT') def visitBackquote(self, node): return self.unaryOp(node, 'UNARY_CONVERT') # bit ops def bitOp(self, nodes, op): self.visit(nodes[0]) for node in nodes[1:]: self.visit(node) self.emit(op) def visitBitand(self, node): return self.bitOp(node.nodes, 'BINARY_AND') def visitBitor(self, node): return self.bitOp(node.nodes, 'BINARY_OR') def visitBitxor(self, node): return self.bitOp(node.nodes, 'BINARY_XOR') # object constructors def visitEllipsis(self, node): self.emit('LOAD_CONST', Ellipsis) def visitTuple(self, node): self.set_lineno(node) for elt in node.nodes: self.visit(elt) self.emit('BUILD_TUPLE', len(node.nodes)) def visitList(self, node): self.set_lineno(node) for elt in node.nodes: self.visit(elt) self.emit('BUILD_LIST', len(node.nodes)) def visitSliceobj(self, node): for child in node.nodes: self.visit(child) self.emit('BUILD_SLICE', len(node.nodes)) def visitDict(self, node): self.set_lineno(node) self.emit('BUILD_MAP', 0) for k, v in node.items: self.emit('DUP_TOP') self.visit(k) self.visit(v) self.emit('ROT_THREE') self.emit('STORE_SUBSCR') class NestedScopeMixin: """Defines initClass() for nested scoping (Python 2.2-compatible)""" def initClass(self): self.__class__.NameFinder = LocalNameFinder self.__class__.FunctionGen = FunctionCodeGenerator self.__class__.ClassGen = ClassCodeGenerator class ModuleCodeGenerator(NestedScopeMixin, CodeGenerator): __super_init = CodeGenerator.__init__ scopes = None def __init__(self, tree): self.graph = pyassem.PyFlowGraph("<module>", tree.filename) self.futures = future.find_futures(tree) self.__super_init() walk(tree, self) def get_module(self): return self class ExpressionCodeGenerator(NestedScopeMixin, CodeGenerator): __super_init = CodeGenerator.__init__ scopes = None futures = () def __init__(self, tree): self.graph = pyassem.PyFlowGraph("<expression>", tree.filename) self.__super_init() walk(tree, self) def get_module(self): return self class InteractiveCodeGenerator(NestedScopeMixin, CodeGenerator): __super_init = CodeGenerator.__init__ scopes = None futures = () def __init__(self, tree): self.graph = pyassem.PyFlowGraph("<interactive>", tree.filename) self.__super_init() self.set_lineno(tree) walk(tree, self) self.emit('RETURN_VALUE') def get_module(self): return self def visitDiscard(self, node): # XXX Discard means it's an expression. Perhaps this is a bad # name. self.visit(node.expr) self.emit('PRINT_EXPR') class AbstractFunctionCode: optimized = 1 lambdaCount = 0 def __init__(self, func, scopes, isLambda, class_name, mod): self.class_name = class_name self.module = mod if isLambda: klass = FunctionCodeGenerator name = "<lambda.%d>" % klass.lambdaCount klass.lambdaCount = klass.lambdaCount + 1 else: name = func.name args, hasTupleArg = generateArgList(func.argnames) self.graph = pyassem.PyFlowGraph(name, func.filename, args, optimized=1) self.isLambda = isLambda self.super_init() if not isLambda and func.doc: self.setDocstring(func.doc) lnf = walk(func.code, self.NameFinder(args), verbose=0) self.locals.push(lnf.getLocals()) if func.varargs: self.graph.setFlag(CO_VARARGS) if func.kwargs: self.graph.setFlag(CO_VARKEYWORDS) self.set_lineno(func) if hasTupleArg: self.generateArgUnpack(func.argnames) def get_module(self): return self.module def finish(self): self.graph.startExitBlock() if not self.isLambda: self.emit('LOAD_CONST', None) self.emit('RETURN_VALUE') def generateArgUnpack(self, args): for i in range(len(args)): arg = args[i] if isinstance(arg, tuple): self.emit('LOAD_FAST', '.%d' % (i * 2)) self.unpackSequence(arg) def unpackSequence(self, tup): if VERSION > 1: self.emit('UNPACK_SEQUENCE', len(tup)) else: self.emit('UNPACK_TUPLE', len(tup)) for elt in tup: if isinstance(elt, tuple): self.unpackSequence(elt) else: self._nameOp('STORE', elt) unpackTuple = unpackSequence class FunctionCodeGenerator(NestedScopeMixin, AbstractFunctionCode, CodeGenerator): super_init = CodeGenerator.__init__ # call be other init scopes = None __super_init = AbstractFunctionCode.__init__ def __init__(self, func, scopes, isLambda, class_name, mod): self.scopes = scopes self.scope = scopes[func] self.__super_init(func, scopes, isLambda, class_name, mod) self.graph.setFreeVars(self.scope.get_free_vars()) self.graph.setCellVars(self.scope.get_cell_vars()) if self.scope.generator is not None: self.graph.setFlag(CO_GENERATOR) class GenExprCodeGenerator(NestedScopeMixin, AbstractFunctionCode, CodeGenerator): super_init = CodeGenerator.__init__ # call be other init scopes = None __super_init = AbstractFunctionCode.__init__ def __init__(self, gexp, scopes, class_name, mod): self.scopes = scopes self.scope = scopes[gexp] self.__super_init(gexp, scopes, 1, class_name, mod) self.graph.setFreeVars(self.scope.get_free_vars()) self.graph.setCellVars(self.scope.get_cell_vars()) self.graph.setFlag(CO_GENERATOR) class AbstractClassCode: def __init__(self, klass, scopes, module): self.class_name = klass.name self.module = module self.graph = pyassem.PyFlowGraph(klass.name, klass.filename, optimized=0, klass=1) self.super_init() lnf = walk(klass.code, self.NameFinder(), verbose=0) self.locals.push(lnf.getLocals()) self.graph.setFlag(CO_NEWLOCALS) if klass.doc: self.setDocstring(klass.doc) def get_module(self): return self.module def finish(self): self.graph.startExitBlock() self.emit('LOAD_LOCALS') self.emit('RETURN_VALUE') class ClassCodeGenerator(NestedScopeMixin, AbstractClassCode, CodeGenerator): super_init = CodeGenerator.__init__ scopes = None __super_init = AbstractClassCode.__init__ def __init__(self, klass, scopes, module): self.scopes = scopes self.scope = scopes[klass] self.__super_init(klass, scopes, module) self.graph.setFreeVars(self.scope.get_free_vars()) self.graph.setCellVars(self.scope.get_cell_vars()) self.set_lineno(klass) self.emit("LOAD_GLOBAL", "__name__") self.storeName("__module__") if klass.doc: self.emit("LOAD_CONST", klass.doc) self.storeName('__doc__') def generateArgList(arglist): """Generate an arg list marking TupleArgs""" args = [] extra = [] count = 0 for i in range(len(arglist)): elt = arglist[i] if isinstance(elt, str): args.append(elt) elif isinstance(elt, tuple): args.append(TupleArg(i * 2, elt)) extra.extend(misc.flatten(elt)) count = count + 1 else: raise ValueError, "unexpect argument type:", elt return args + extra, count def findOp(node): """Find the op (DELETE, LOAD, STORE) in an AssTuple tree""" v = OpFinder() walk(node, v, verbose=0) return v.op class OpFinder: def __init__(self): self.op = None def visitAssName(self, node): if self.op is None: self.op = node.flags elif self.op != node.flags: raise ValueError, "mixed ops in stmt" visitAssAttr = visitAssName visitSubscript = visitAssName class Delegator: """Base class to support delegation for augmented assignment nodes To generator code for augmented assignments, we use the following wrapper classes. In visitAugAssign, the left-hand expression node is visited twice. The first time the visit uses the normal method for that node . The second time the visit uses a different method that generates the appropriate code to perform the assignment. These delegator classes wrap the original AST nodes in order to support the variant visit methods. """ def __init__(self, obj): self.obj = obj def __getattr__(self, attr): return getattr(self.obj, attr) class AugGetattr(Delegator): pass class AugName(Delegator): pass class AugSlice(Delegator): pass class AugSubscript(Delegator): pass wrapper = { ast.Getattr: AugGetattr, ast.Name: AugName, ast.Slice: AugSlice, ast.Subscript: AugSubscript, } def wrap_aug(node): return wrapper[node.__class__](node) if __name__ == "__main__": for file in sys.argv[1:]: compileFile(file)
adaussy/eclipse-monkey-revival
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/compiler/pycodegen.py
Python
epl-1.0
47,712
[ "VisIt" ]
3f3189105c565e550fc435ed6257a0a3c453d2c3b7bfbbca4b516dbab8a945de
#!/usr/bin/python """ Copyright 2010 Paul Willworth <ioscode@gmail.com> This file is part of Galaxy Harvester. Galaxy Harvester is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Galaxy Harvester is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>. """ import os import cgi import Cookie import MySQLdb import dbSession import dbShared # Get current url try: url = os.environ['SCRIPT_NAME'] except KeyError: url = '' form = cgi.FieldStorage() # Get Cookies errorstr = '' cookies = Cookie.SimpleCookie() try: cookies.load(os.environ['HTTP_COOKIE']) except KeyError: errorstr = 'no cookies\n' if errorstr == '': try: currentUser = cookies['userID'].value except KeyError: currentUser = '' try: loginResult = cookies['loginAttempt'].value except KeyError: loginResult = 'success' try: sid = cookies['gh_sid'].value except KeyError: sid = form.getfirst('gh_sid', '') else: currentUser = '' loginResult = 'success' sid = form.getfirst('gh_sid', '') email = form.getfirst("email") # escape input to prevent sql injection sid = dbShared.dbInsertSafe(sid) email = dbShared.dbInsertSafe(email) # Get a session logged_state = 0 linkappend = '' sess = dbSession.getSession(sid, 2592000) if (sess != ''): logged_state = 1 currentUser = sess linkappend = 'gh_sid=' + sid # Check for errors errstr='' if (len(email) < 6): errstr = errstr + "That is not a valid email address. \r\n" if (logged_state == 0): errstr = errstr + "You must be logged in to update your email address. \r\n" if (errstr != ''): result = "Your E-mail Address could not be updated because of the following errors:\r\n" + errstr else: conn = dbShared.ghConn() cursor = conn.cursor() cursor.execute("UPDATE tUsers SET emailAddress='" + email + "' WHERE userID='" + currentUser + "';") cursor.close() conn.close() result = "E-Mail Address Updated" print "Content-Type: text/html\n" print result
clreinki/GalaxyHarvester
udEmail.py
Python
agpl-3.0
2,533
[ "Galaxy" ]
da61fd5a08de6af42fd858f8f17464760f9aaf1eca373d2113ed94b031ec10a7
# -*- coding: utf8 """Random Projection transformers Random Projections are a simple and computationally efficient way to reduce the dimensionality of the data by trading a controlled amount of accuracy (as additional variance) for faster processing times and smaller model sizes. The dimensions and distribution of Random Projections matrices are controlled so as to preserve the pairwise distances between any two samples of the dataset. The main theoretical result behind the efficiency of random projection is the `Johnson-Lindenstrauss lemma (quoting Wikipedia) <https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_: In mathematics, the Johnson-Lindenstrauss lemma is a result concerning low-distortion embeddings of points from high-dimensional into low-dimensional Euclidean space. The lemma states that a small set of points in a high-dimensional space can be embedded into a space of much lower dimension in such a way that distances between the points are nearly preserved. The map used for the embedding is at least Lipschitz, and can even be taken to be an orthogonal projection. """ # Authors: Olivier Grisel <olivier.grisel@ensta.org>, # Arnaud Joly <a.joly@ulg.ac.be> # License: BSD 3 clause import warnings from abc import ABCMeta, abstractmethod import numpy as np import scipy.sparse as sp from .base import BaseEstimator, TransformerMixin from .utils import check_random_state from .utils.extmath import safe_sparse_dot from .utils.random import sample_without_replacement from .utils.validation import check_array, check_is_fitted from .utils.validation import _deprecate_positional_args from .exceptions import DataDimensionalityWarning __all__ = ["SparseRandomProjection", "GaussianRandomProjection", "johnson_lindenstrauss_min_dim"] @_deprecate_positional_args def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1): """Find a 'safe' number of components to randomly project to The distortion introduced by a random projection `p` only changes the distance between two points by a factor (1 +- eps) in an euclidean space with good probability. The projection `p` is an eps-embedding as defined by: (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2 Where u and v are any rows taken from a dataset of shape [n_samples, n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian N(0, 1) matrix with shape [n_components, n_features] (or a sparse Achlioptas matrix). The minimum number of components to guarantee the eps-embedding is given by: n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3) Note that the number of dimensions is independent of the original number of features but instead depends on the size of the dataset: the larger the dataset, the higher is the minimal dimensionality of an eps-embedding. Read more in the :ref:`User Guide <johnson_lindenstrauss>`. Parameters ---------- n_samples : int or numpy array of int greater than 0, Number of samples. If an array is given, it will compute a safe number of components array-wise. eps : float or ndarray of shape (n_components,), dtype=float, \ default=0.1 Maximum distortion rate in the range (0,1 ) as defined by the Johnson-Lindenstrauss lemma. If an array is given, it will compute a safe number of components array-wise. Returns ------- n_components : int or numpy array of int, The minimal number of components to guarantee with good probability an eps-embedding with n_samples. Examples -------- >>> johnson_lindenstrauss_min_dim(1e6, eps=0.5) 663 >>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01]) array([ 663, 11841, 1112658]) >>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1) array([ 7894, 9868, 11841]) References ---------- .. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma .. [2] Sanjoy Dasgupta and Anupam Gupta, 1999, "An elementary proof of the Johnson-Lindenstrauss Lemma." http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654 """ eps = np.asarray(eps) n_samples = np.asarray(n_samples) if np.any(eps <= 0.0) or np.any(eps >= 1): raise ValueError( "The JL bound is defined for eps in ]0, 1[, got %r" % eps) if np.any(n_samples) <= 0: raise ValueError( "The JL bound is defined for n_samples greater than zero, got %r" % n_samples) denominator = (eps ** 2 / 2) - (eps ** 3 / 3) return (4 * np.log(n_samples) / denominator).astype(int) def _check_density(density, n_features): """Factorize density check according to Li et al.""" if density == 'auto': density = 1 / np.sqrt(n_features) elif density <= 0 or density > 1: raise ValueError("Expected density in range ]0, 1], got: %r" % density) return density def _check_input_size(n_components, n_features): """Factorize argument checking for random matrix generation""" if n_components <= 0: raise ValueError("n_components must be strictly positive, got %d" % n_components) if n_features <= 0: raise ValueError("n_features must be strictly positive, got %d" % n_features) def _gaussian_random_matrix(n_components, n_features, random_state=None): """Generate a dense Gaussian random matrix. The components of the random matrix are drawn from N(0, 1.0 / n_components). Read more in the :ref:`User Guide <gaussian_random_matrix>`. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. random_state : int or RandomState instance, default=None Controls the pseudo random number generator used to generate the matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- components : numpy array of shape [n_components, n_features] The generated Gaussian random matrix. See Also -------- GaussianRandomProjection """ _check_input_size(n_components, n_features) rng = check_random_state(random_state) components = rng.normal(loc=0.0, scale=1.0 / np.sqrt(n_components), size=(n_components, n_features)) return components def _sparse_random_matrix(n_components, n_features, density='auto', random_state=None): """Generalized Achlioptas random sparse matrix for random projection Setting density to 1 / 3 will yield the original matrix by Dimitris Achlioptas while setting a lower value will yield the generalization by Ping Li et al. If we note :math:`s = 1 / density`, the components of the random matrix are drawn from: - -sqrt(s) / sqrt(n_components) with probability 1 / 2s - 0 with probability 1 - 1 / s - +sqrt(s) / sqrt(n_components) with probability 1 / 2s Read more in the :ref:`User Guide <sparse_random_matrix>`. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. density : float or 'auto', default='auto' Ratio of non-zero component in the random projection matrix in the range `(0, 1]` If density = 'auto', the value is set to the minimum density as recommended by Ping Li et al.: 1 / sqrt(n_features). Use density = 1 / 3.0 if you want to reproduce the results from Achlioptas, 2001. random_state : int or RandomState instance, default=None Controls the pseudo random number generator used to generate the matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- components : array or CSR matrix with shape [n_components, n_features] The generated Gaussian random matrix. See Also -------- SparseRandomProjection References ---------- .. [1] Ping Li, T. Hastie and K. W. Church, 2006, "Very Sparse Random Projections". https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf .. [2] D. Achlioptas, 2001, "Database-friendly random projections", http://www.cs.ucsc.edu/~optas/papers/jl.pdf """ _check_input_size(n_components, n_features) density = _check_density(density, n_features) rng = check_random_state(random_state) if density == 1: # skip index generation if totally dense components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1 return 1 / np.sqrt(n_components) * components else: # Generate location of non zero elements indices = [] offset = 0 indptr = [offset] for _ in range(n_components): # find the indices of the non-zero components for row i n_nonzero_i = rng.binomial(n_features, density) indices_i = sample_without_replacement(n_features, n_nonzero_i, random_state=rng) indices.append(indices_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) # Among non zero components the probability of the sign is 50%/50% data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1 # build the CSR structure by concatenating the rows components = sp.csr_matrix((data, indices, indptr), shape=(n_components, n_features)) return np.sqrt(1 / density) / np.sqrt(n_components) * components class BaseRandomProjection(TransformerMixin, BaseEstimator, metaclass=ABCMeta): """Base class for random projections. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, n_components='auto', *, eps=0.1, dense_output=False, random_state=None): self.n_components = n_components self.eps = eps self.dense_output = dense_output self.random_state = random_state @abstractmethod def _make_random_matrix(self, n_components, n_features): """ Generate the random projection matrix Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : numpy array or CSR matrix [n_components, n_features] The generated random matrix. """ def fit(self, X, y=None): """Generate a sparse random projection matrix Parameters ---------- X : numpy array or scipy.sparse of shape [n_samples, n_features] Training set: only the shape is used to find optimal random matrix dimensions based on the theory referenced in the afore mentioned papers. y Ignored Returns ------- self """ X = self._validate_data(X, accept_sparse=['csr', 'csc']) n_samples, n_features = X.shape if self.n_components == 'auto': self.n_components_ = johnson_lindenstrauss_min_dim( n_samples=n_samples, eps=self.eps) if self.n_components_ <= 0: raise ValueError( 'eps=%f and n_samples=%d lead to a target dimension of ' '%d which is invalid' % ( self.eps, n_samples, self.n_components_)) elif self.n_components_ > n_features: raise ValueError( 'eps=%f and n_samples=%d lead to a target dimension of ' '%d which is larger than the original space with ' 'n_features=%d' % (self.eps, n_samples, self.n_components_, n_features)) else: if self.n_components <= 0: raise ValueError("n_components must be greater than 0, got %s" % self.n_components) elif self.n_components > n_features: warnings.warn( "The number of components is higher than the number of" " features: n_features < n_components (%s < %s)." "The dimensionality of the problem will not be reduced." % (n_features, self.n_components), DataDimensionalityWarning) self.n_components_ = self.n_components # Generate a projection matrix of size [n_components, n_features] self.components_ = self._make_random_matrix(self.n_components_, n_features) # Check contract assert self.components_.shape == (self.n_components_, n_features), ( 'An error has occurred the self.components_ matrix has ' ' not the proper shape.') return self def transform(self, X): """Project the data by using matrix product with the random matrix Parameters ---------- X : numpy array or scipy.sparse of shape [n_samples, n_features] The input data to project into a smaller dimensional space. Returns ------- X_new : numpy array or scipy sparse of shape [n_samples, n_components] Projected array. """ X = check_array(X, accept_sparse=['csr', 'csc']) check_is_fitted(self) if X.shape[1] != self.components_.shape[1]: raise ValueError( 'Impossible to perform projection:' 'X at fit stage had a different number of features. ' '(%s != %s)' % (X.shape[1], self.components_.shape[1])) X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output) return X_new class GaussianRandomProjection(BaseRandomProjection): """Reduce dimensionality through Gaussian random projection The components of the random matrix are drawn from N(0, 1 / n_components). Read more in the :ref:`User Guide <gaussian_random_matrix>`. .. versionadded:: 0.13 Parameters ---------- n_components : int or 'auto', default='auto' Dimensionality of the target projection space. n_components can be automatically adjusted according to the number of samples in the dataset and the bound given by the Johnson-Lindenstrauss lemma. In that case the quality of the embedding is controlled by the ``eps`` parameter. It should be noted that Johnson-Lindenstrauss lemma can yield very conservative estimated of the required number of components as it makes no assumption on the structure of the dataset. eps : float, default=0.1 Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when `n_components` is set to 'auto'. The value should be strictly positive. Smaller values lead to better embedding and higher number of dimensions (n_components) in the target projection space. random_state : int or RandomState instance, default=None Controls the pseudo random number generator used to generate the projection matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- n_components_ : int Concrete number of components computed when n_components="auto". components_ : numpy array of shape [n_components, n_features] Random matrix used for the projection. Examples -------- >>> import numpy as np >>> from sklearn.random_projection import GaussianRandomProjection >>> rng = np.random.RandomState(42) >>> X = rng.rand(100, 10000) >>> transformer = GaussianRandomProjection(random_state=rng) >>> X_new = transformer.fit_transform(X) >>> X_new.shape (100, 3947) See Also -------- SparseRandomProjection """ @_deprecate_positional_args def __init__(self, n_components='auto', *, eps=0.1, random_state=None): super().__init__( n_components=n_components, eps=eps, dense_output=True, random_state=random_state) def _make_random_matrix(self, n_components, n_features): """ Generate the random projection matrix Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : numpy array or CSR matrix [n_components, n_features] The generated random matrix. """ random_state = check_random_state(self.random_state) return _gaussian_random_matrix(n_components, n_features, random_state=random_state) class SparseRandomProjection(BaseRandomProjection): """Reduce dimensionality through sparse random projection Sparse random matrix is an alternative to dense random projection matrix that guarantees similar embedding quality while being much more memory efficient and allowing faster computation of the projected data. If we note `s = 1 / density` the components of the random matrix are drawn from: - -sqrt(s) / sqrt(n_components) with probability 1 / 2s - 0 with probability 1 - 1 / s - +sqrt(s) / sqrt(n_components) with probability 1 / 2s Read more in the :ref:`User Guide <sparse_random_matrix>`. .. versionadded:: 0.13 Parameters ---------- n_components : int or 'auto', default='auto' Dimensionality of the target projection space. n_components can be automatically adjusted according to the number of samples in the dataset and the bound given by the Johnson-Lindenstrauss lemma. In that case the quality of the embedding is controlled by the ``eps`` parameter. It should be noted that Johnson-Lindenstrauss lemma can yield very conservative estimated of the required number of components as it makes no assumption on the structure of the dataset. density : float or 'auto', default='auto' Ratio in the range (0, 1] of non-zero component in the random projection matrix. If density = 'auto', the value is set to the minimum density as recommended by Ping Li et al.: 1 / sqrt(n_features). Use density = 1 / 3.0 if you want to reproduce the results from Achlioptas, 2001. eps : float, default=0.1 Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when n_components is set to 'auto'. This value should be strictly positive. Smaller values lead to better embedding and higher number of dimensions (n_components) in the target projection space. dense_output : bool, default=False If True, ensure that the output of the random projection is a dense numpy array even if the input and random projection matrix are both sparse. In practice, if the number of components is small the number of zero components in the projected data will be very small and it will be more CPU and memory efficient to use a dense representation. If False, the projected data uses a sparse representation if the input is sparse. random_state : int or RandomState instance, default=None Controls the pseudo random number generator used to generate the projection matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- n_components_ : int Concrete number of components computed when n_components="auto". components_ : CSR matrix with shape [n_components, n_features] Random matrix used for the projection. density_ : float in range 0.0 - 1.0 Concrete density computed from when density = "auto". Examples -------- >>> import numpy as np >>> from sklearn.random_projection import SparseRandomProjection >>> rng = np.random.RandomState(42) >>> X = rng.rand(100, 10000) >>> transformer = SparseRandomProjection(random_state=rng) >>> X_new = transformer.fit_transform(X) >>> X_new.shape (100, 3947) >>> # very few components are non-zero >>> np.mean(transformer.components_ != 0) 0.0100... See Also -------- GaussianRandomProjection References ---------- .. [1] Ping Li, T. Hastie and K. W. Church, 2006, "Very Sparse Random Projections". https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf .. [2] D. Achlioptas, 2001, "Database-friendly random projections", https://users.soe.ucsc.edu/~optas/papers/jl.pdf """ @_deprecate_positional_args def __init__(self, n_components='auto', *, density='auto', eps=0.1, dense_output=False, random_state=None): super().__init__( n_components=n_components, eps=eps, dense_output=dense_output, random_state=random_state) self.density = density def _make_random_matrix(self, n_components, n_features): """ Generate the random projection matrix Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : numpy array or CSR matrix [n_components, n_features] The generated random matrix. """ random_state = check_random_state(self.random_state) self.density_ = _check_density(self.density, n_features) return _sparse_random_matrix(n_components, n_features, density=self.density_, random_state=random_state)
bnaul/scikit-learn
sklearn/random_projection.py
Python
bsd-3-clause
22,989
[ "Gaussian" ]
0dae8e4f8aacf5190cd9a541a95fb89fc7276f34884535f457ef7dd67ed504aa
#!/usr/bin/env python """Copyright 2010 Phidgets Inc. This work is licensed under the Creative Commons Attribution 2.5 Canada License. To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/ """ __author__ = 'Adam Stelmack' __version__ = '2.1.8' __date__ = 'May 17 2010' #Basic imports from ctypes import * import sys from time import sleep #Phidget specific imports from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException from Phidgets.Events.Events import AttachEventArgs, DetachEventArgs, ErrorEventArgs, InputChangeEventArgs, CurrentChangeEventArgs, StepperPositionChangeEventArgs, VelocityChangeEventArgs from Phidgets.Devices.Stepper import Stepper from Phidgets.Phidget import PhidgetLogLevel #Create a stepper object try: stepper = Stepper() except RuntimeError as e: print("Runtime Exception: %s" % e.details) print("Exiting....") exit(1) #Information Display Function def DisplayDeviceInfo(): print("|------------|----------------------------------|--------------|------------|") print("|- Attached -|- Type -|- Serial No. -|- Version -|") print("|------------|----------------------------------|--------------|------------|") print("|- %8s -|- %30s -|- %10d -|- %8d -|" % (stepper.isAttached(), stepper.getDeviceName(), stepper.getSerialNum(), stepper.getDeviceVersion())) print("|------------|----------------------------------|--------------|------------|") print("Number of Motors: %i" % (stepper.getMotorCount())) #Event Handler Callback Functions def StepperAttached(e): attached = e.device print("Stepper %i Attached!" % (attached.getSerialNum())) def StepperDetached(e): detached = e.device print("Stepper %i Detached!" % (detached.getSerialNum())) def StepperError(e): try: source = e.device print("Stepper %i: Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description)) except PhidgetException as e: print("Phidget Exception %i: %s" % (e.code, e.details)) def StepperCurrentChanged(e): source = e.device print("Stepper %i: Motor %i -- Current Draw: %6f" % (source.getSerialNum(), e.index, e.current)) def StepperInputChanged(e): source = e.device print("Stepper %i: Input %i -- State: %s" % (source.getSerialNum(), e.index, e.state)) def StepperPositionChanged(e): source = e.device print("Stepper %i: Motor %i -- Position: %f" % (source.getSerialNum(), e.index, e.position)) def StepperVelocityChanged(e): source = e.device print("Stepper %i: Motor %i -- Velocity: %f" % (source.getSerialNum(), e.index, e.velocity)) #Main Program Code try: #logging example, uncomment to generate a log file #stepper.enableLogging(PhidgetLogLevel.PHIDGET_LOG_VERBOSE, "phidgetlog.log") stepper.setOnAttachHandler(StepperAttached) stepper.setOnDetachHandler(StepperDetached) stepper.setOnErrorhandler(StepperError) stepper.setOnCurrentChangeHandler(StepperCurrentChanged) stepper.setOnInputChangeHandler(StepperInputChanged) stepper.setOnPositionChangeHandler(StepperPositionChanged) stepper.setOnVelocityChangeHandler(StepperVelocityChanged) except PhidgetException as e: print("Phidget Exception %i: %s" % (e.code, e.details)) print("Exiting....") exit(1) print("Opening phidget object....") try: stepper.openPhidget() except PhidgetException as e: print("Phidget Exception %i: %s" % (e.code, e.details)) print("Exiting....") exit(1) print("Waiting for attach....") try: stepper.waitForAttach(10000) except PhidgetException as e: print("Phidget Exception %i: %s" % (e.code, e.details)) try: stepper.closePhidget() except PhidgetException as e: print("Phidget Exception %i: %s" % (e.code, e.details)) print("Exiting....") exit(1) print("Exiting....") exit(1) else: DisplayDeviceInfo() try: print("Set the current position as start position...") stepper.setCurrentPosition(0, 0) sleep(1) print("Set the motor as engaged...") stepper.setEngaged(0, True) sleep(1) print("The motor will run until it reaches the set goal position...") stepper.setAcceleration(0, 87543) stepper.setVelocityLimit(0, 6200) stepper.setCurrentLimit(0, 0.26) sleep(2) print("Will now move to position 20000...") stepper.setTargetPosition(0, 20000) while stepper.getCurrentPosition(0) != 20000: pass sleep(2) print("Will now move back to positon 0...") stepper.setTargetPosition(0, 0) while stepper.getCurrentPosition(0) != 0: pass except PhidgetException as e: print("Phidget Exception %i: %s" % (e.code, e.details)) print("Exiting....") exit(1) print("Press Enter to quit....") chr = sys.stdin.read(1) print("Closing...") try: stepper.setEngaged(0, False) sleep(1) stepper.closePhidget() except PhidgetException as e: print("Phidget Exception %i: %s" % (e.code, e.details)) print("Exiting....") exit(1) print("Done.") exit(0)
dborrero/Reed-Taylor-Couette
PhidgetPythonSamples/Stepper-simple.py
Python
gpl-2.0
5,280
[ "VisIt" ]
a225f64aceb1b3696b6a139e9d7fd891533d15a1389e6568a716096fffe51d0b
#!/usr/bin/env python import pysam import numpy import os,sys,re from optparse import * from logging import error COMPLEMENT = { 'a' : 't', 't' : 'a', 'c' : 'g', 'g' : 'c', 'k' : 'm', 'm' : 'k', 'r' : 'y', 'y' : 'r', 's' : 's', 'w' : 'w', 'b' : 'v', 'v' : 'b', 'h' : 'd', 'd' : 'h', 'n' : 'n', 'A' : 'T', 'T' : 'A', 'C' : 'G', 'G' : 'C', 'K' : 'M', 'M' : 'K', 'R' : 'Y', 'Y' : 'R', 'S' : 'S', 'W' : 'W', 'B' : 'V', 'V' : 'B', 'H' : 'D', 'D' : 'H', 'N' : 'N', } def complement(s): return "".join([COMPLEMENT[x] for x in s]) def rev_comp(seq): return complement(seq)[::-1] def fasta_chunks(lines,strip=True,fuse=True): chunk = "" data = [] for l in lines: if l.startswith("#"): continue if l.startswith(">"): if data and chunk: #print chunk yield chunk,"".join(data) if strip: data = [] else: data = [l] chunk = l[1:].strip() else: if fuse: data.append(l.rstrip()) else: data.append(l) if data and chunk: yield chunk,"".join(data) def qfa_chunks(lines): """ Iterates over FASTQ text lines from a file like object and yields NamedTuple instances of the FASTQ with attributes qfa.name, qfa.seq, qfa.qual """ from collections import namedtuple QFA = namedtuple("qfa_tuple","name,seq,qual") I = lines.__iter__() try: while I: name = I.next().rstrip()[1:] seq = I.next().rstrip() plus = I.next().rstrip()[1:] qual = I.next().rstrip() yield QFA(name,seq,qual) except StopIteration: pass usage = """ %prog <alignments.bam> > unmapped_anchors.qfa Extract anchor sequences from unmapped reads. Optionally permute. """ parser = OptionParser(usage=usage) parser.add_option("-a","--anchor",dest="asize",type=int,default=20,help="anchor size") parser.add_option("-q","--minqual",dest="minqual",type=int,default=5,help="min avg. qual along both anchors (default=5)") parser.add_option("-r","--rev",dest="rev",type="choice",choices=["A","B","R","N","C","P"],default="N",help="permute read parts or reverse A,B,R,C,N for control") parser.add_option("-R","--reads",dest="reads",action="store_true",default=False,help="instead of unmapped reads from BAM, input is sites.reads from find_circ.py") parser.add_option("-F","--fasta",dest="fasta",action="store_true",default=False,help="instead of unmapped reads from BAM, input is FASTA file") parser.add_option("-Q","--fastq",dest="fastq",action="store_true",default=False,help="instead of unmapped reads from BAM, input is FASTQ file") options,args = parser.parse_args() import random perm_A = [] perm_I = [] perm_B = [] perm_burn_in = [] N_perm = 100 def randomchoice(l): return l.pop(random.randint(0,len(l)-1)) def passthru(x): return x def reverse(x): return x[::-1] funcs = { 'A' : (passthru,reverse,passthru), 'B' : (passthru,passthru,reverse), 'R' : (reverse,passthru,passthru), 'N' : (passthru,passthru,passthru), 'P' : (passthru,passthru,passthru), 'C' : (passthru,passthru,passthru), } read_f,A_f,B_f = funcs[options.rev] def handle_read(read): if not read.is_unmapped: return seq,qual = read_f(read.seq),read_f(read.qual) # minimal quality scores nq = numpy.fromstring(qual,dtype=numpy.uint8) - 35 if nq[:options.asize].mean() < options.minqual or nq[-options.asize:].mean() < options.minqual: # read is junk #print "qual.fail",nq[:options.asize].mean(),nq[-options.asize:].mean() return if options.rev == "P": perm_A.append((seq[:options.asize],qual[:options.asize])) perm_B.append((seq[-options.asize:],qual[-options.asize:])) perm_I.append((seq[options.asize:-options.asize:],qual[options.asize:-options.asize:])) if len(perm_burn_in) < N_perm: # collect some reads for permutation control first. perm_burn_in.append(read) return A_seq,A_qual = randomchoice(perm_A) B_seq,B_qual = randomchoice(perm_B) I_seq,I_qual = randomchoice(perm_I) seq,qual = A_seq+I_seq+B_seq,A_qual+I_qual+B_qual if options.rev == "C": seq = rev_comp(seq) qual = reverse(qual) print "@%s_A__%s" % (read.qname,seq) print A_f(seq[:options.asize]) print "+" print A_f(qual[:options.asize]) print "@%s_B" % read.qname print B_f(seq[-options.asize:]) print "+" print B_f(qual[-options.asize:]) if options.reads: N = 0 for line in file(args[0]): name,seq = line.rstrip().split('\t').replace(" ","_") N +=1 class Item(object): pass read = Item() read.qname = "%s_%d" % (name,N) read.is_unmapped=True read.seq = seq read.qual = "b"*len(seq) handle_read(read) elif options.fasta: N = 0 for name,seq in fasta_chunks(file(args[0])): N += 1 name = name.replace(" ","_") class Item(object): pass read = Item() read.qname = "%s_%d" % (name,N) read.is_unmapped=True read.seq = seq read.qual = "b"*len(seq) handle_read(read) elif options.fastq: N = 0 for fq in qfa_chunks(file(args[0])): N += 1 name = fq.name.replace(" ","_") class Item(object): pass read = Item() read.qname = "%s_%d" % (name,N) read.is_unmapped=True read.seq = fq.seq read.qual = fq.qual handle_read(read) else: for read in pysam.Samfile(args[0],'rb'): handle_read(read) for read in perm_burn_in: handle_read(read)
marvin-jens/find_circ
unmapped2anchors.py
Python
gpl-3.0
6,014
[ "pysam" ]
3eacbba71253487c1a7400b97720b281cac83a7008c5090d6e827f65990c600f
# Generated by Django 3.1.8 on 2021-04-19 10:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("foirequest", "0049_auto_20210203_1237"), ] operations = [ migrations.AddField( model_name="foimessage", name="content_rendered_anon", field=models.TextField(blank=True, null=True), ), migrations.AddField( model_name="foimessage", name="content_rendered_auth", field=models.TextField(blank=True, null=True), ), migrations.AlterField( model_name="foimessage", name="kind", field=models.CharField( choices=[ ("email", "email"), ("post", "postal mail"), ("fax", "fax"), ("upload", "upload"), ("phone", "phone call"), ("visit", "visit in person"), ], default="email", max_length=10, ), ), ]
fin/froide
froide/foirequest/migrations/0050_auto_20210419_1239.py
Python
mit
1,114
[ "VisIt" ]
c2dc808a59a3e8384420b854674a1a80eb2ecb2ac70c3ac26e37c5ae84fa6c86
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Autocompletion config for YouCompleteMe in Chromium. # # USAGE: # # 1. Install YCM [https://github.com/Valloric/YouCompleteMe] # (Googlers should check out [go/ycm]) # # 2. Create a symbolic link to this file called .ycm_extra_conf.py in the # directory above your Chromium checkout (i.e. next to your .gclient file). # # cd src # ln -rs tools/vim/chromium.ycm_extra_conf.py ../.ycm_extra_conf.py # # 3. (optional) Whitelist the .ycm_extra_conf.py from step #2 by adding the # following to your .vimrc: # # let g:ycm_extra_conf_globlist=['<path to .ycm_extra_conf.py>'] # # You can also add other .ycm_extra_conf.py files you want to use to this # list to prevent excessive prompting each time you visit a directory # covered by a config file. # # 4. Profit # # # Usage notes: # # * You must use ninja & clang to build Chromium. # # * You must have run gyp_chromium and built Chromium recently. # # # Hacking notes: # # * The purpose of this script is to construct an accurate enough command line # for YCM to pass to clang so it can build and extract the symbols. # # * Right now, we only pull the -I and -D flags. That seems to be sufficient # for everything I've used it for. # # * That whole ninja & clang thing? We could support other configs if someone # were willing to write the correct commands and a parser. # # * This has only been tested on gPrecise. import os import os.path import re import shlex import subprocess import sys # Flags from YCM's default config. _default_flags = [ '-DUSE_CLANG_COMPLETER', '-std=c++11', '-x', 'c++', ] def PathExists(*args): return os.path.exists(os.path.join(*args)) def FindChromeSrcFromFilename(filename): """Searches for the root of the Chromium checkout. Simply checks parent directories until it finds .gclient and src/. Args: filename: (String) Path to source file being edited. Returns: (String) Path of 'src/', or None if unable to find. """ curdir = os.path.normpath(os.path.dirname(filename)) while not (os.path.basename(os.path.realpath(curdir)) == 'src' and PathExists(curdir, 'DEPS') and (PathExists(curdir, '..', '.gclient') or PathExists(curdir, '.git'))): nextdir = os.path.normpath(os.path.join(curdir, '..')) if nextdir == curdir: return None curdir = nextdir return curdir def GetDefaultSourceFile(chrome_root, filename): """Returns the default source file to use as an alternative to |filename|. Compile flags used to build the default source file is assumed to be a close-enough approximation for building |filename|. Args: chrome_root: (String) Absolute path to the root of Chromium checkout. filename: (String) Absolute path to the source file. Returns: (String) Absolute path to substitute source file. """ blink_root = os.path.join(chrome_root, 'third_party', 'WebKit') if filename.startswith(blink_root): return os.path.join(blink_root, 'Source', 'core', 'Init.cpp') else: if 'test.' in filename: return os.path.join(chrome_root, 'base', 'logging_unittest.cc') return os.path.join(chrome_root, 'base', 'logging.cc') def GetBuildableSourceFile(chrome_root, filename): """Returns a buildable source file corresponding to |filename|. A buildable source file is one which is likely to be passed into clang as a source file during the build. For .h files, returns the closest matching .cc, .cpp or .c file. If no such file is found, returns the same as GetDefaultSourceFile(). Args: chrome_root: (String) Absolute path to the root of Chromium checkout. filename: (String) Absolute path to the target source file. Returns: (String) Absolute path to source file. """ if filename.endswith('.h'): # Header files can't be built. Instead, try to match a header file to its # corresponding source file. alternates = ['.cc', '.cpp', '.c'] for alt_extension in alternates: alt_name = filename[:-2] + alt_extension if os.path.exists(alt_name): return alt_name return GetDefaultSourceFile(chrome_root, filename) return filename def GetNinjaBuildOutputsForSourceFile(out_dir, filename): """Returns a list of build outputs for filename. The list is generated by invoking 'ninja -t query' tool to retrieve a list of inputs and outputs of |filename|. This list is then filtered to only include .o and .obj outputs. Args: out_dir: (String) Absolute path to ninja build output directory. filename: (String) Absolute path to source file. Returns: (List of Strings) List of target names. Will return [] if |filename| doesn't yield any .o or .obj outputs. """ # Ninja needs the path to the source file relative to the output build # directory. rel_filename = os.path.relpath(os.path.realpath(filename), out_dir) p = subprocess.Popen(['ninja', '-C', out_dir, '-t', 'query', rel_filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) stdout, _ = p.communicate() if p.returncode != 0: return [] # The output looks like: # ../../relative/path/to/source.cc: # outputs: # obj/reative/path/to/target.source.o # obj/some/other/target2.source.o # another/target.txt # outputs_text = stdout.partition('\n outputs:\n')[2] output_lines = [line.strip() for line in outputs_text.split('\n')] return [target for target in output_lines if target and (target.endswith('.o') or target.endswith('.obj'))] def GetClangCommandLineForNinjaOutput(out_dir, build_target): """Returns the Clang command line for building |build_target| Asks ninja for the list of commands used to build |filename| and returns the final Clang invocation. Args: out_dir: (String) Absolute path to ninja build output directory. build_target: (String) A build target understood by ninja Returns: (String or None) Clang command line or None if a Clang command line couldn't be determined. """ p = subprocess.Popen(['ninja', '-v', '-C', out_dir, '-t', 'commands', build_target], stdout=subprocess.PIPE, universal_newlines=True) stdout, stderr = p.communicate() if p.returncode != 0: return None # Ninja will return multiple build steps for all dependencies up to # |build_target|. The build step we want is the last Clang invocation, which # is expected to be the one that outputs |build_target|. for line in reversed(stdout.split('\n')): if 'clang' in line: return line return None def GetClangCommandLineFromNinjaForSource(out_dir, filename): """Returns a Clang command line used to build |filename|. The same source file could be built multiple times using different tool chains. In such cases, this command returns the first Clang invocation. We currently don't prefer one toolchain over another. Hopefully the tool chain corresponding to the Clang command line is compatible with the Clang build used by YCM. Args: out_dir: (String) Absolute path to Chromium checkout. filename: (String) Absolute path to source file. Returns: (String or None): Command line for Clang invocation using |filename| as a source. Returns None if no such command line could be found. """ build_targets = GetNinjaBuildOutputsForSourceFile(out_dir, filename) for build_target in build_targets: command_line = GetClangCommandLineForNinjaOutput(out_dir, build_target) if command_line: return command_line return None def GetClangOptionsFromCommandLine(clang_commandline, out_dir, additional_flags): """Extracts relevant command line options from |clang_commandline| Args: clang_commandline: (String) Full Clang invocation. out_dir: (String) Absolute path to ninja build directory. Relative paths in the command line are relative to |out_dir|. additional_flags: (List of String) Additional flags to return. Returns: (List of Strings) The list of command line flags for this source file. Can be empty. """ clang_flags = [] + additional_flags # Parse flags that are important for YCM's purposes. clang_tokens = shlex.split(clang_commandline) for flag_index, flag in enumerate(clang_tokens): if flag.startswith('-I'): # Relative paths need to be resolved, because they're relative to the # output dir, not the source. if flag[2] == '/': clang_flags.append(flag) else: abs_path = os.path.normpath(os.path.join(out_dir, flag[2:])) clang_flags.append('-I' + abs_path) elif flag.startswith('-std'): clang_flags.append(flag) elif flag.startswith('-') and flag[1] in 'DWFfmO': if flag == '-Wno-deprecated-register' or flag == '-Wno-header-guard': # These flags causes libclang (3.3) to crash. Remove it until things # are fixed. continue clang_flags.append(flag) elif flag == '-isysroot': # On Mac -isysroot <path> is used to find the system headers. # Copy over both flags. if flag_index + 1 < len(clang_tokens): clang_flags.append(flag) clang_flags.append(clang_tokens[flag_index + 1]) elif flag.startswith('--sysroot='): # On Linux we use a sysroot image. sysroot_path = flag.lstrip('--sysroot=') if sysroot_path.startswith('/'): clang_flags.append(flag) else: abs_path = os.path.normpath(os.path.join(out_dir, sysroot_path)) clang_flags.append('--sysroot=' + abs_path) return clang_flags def GetClangOptionsFromNinjaForFilename(chrome_root, filename): """Returns the Clang command line options needed for building |filename|. Command line options are based on the command used by ninja for building |filename|. If |filename| is a .h file, uses its companion .cc or .cpp file. If a suitable companion file can't be located or if ninja doesn't know about |filename|, then uses default source files in Blink and Chromium for determining the commandline. Args: chrome_root: (String) Path to src/. filename: (String) Absolute path to source file being edited. Returns: (List of Strings) The list of command line flags for this source file. Can be empty. """ if not chrome_root: return [] # Generally, everyone benefits from including Chromium's src/, because all of # Chromium's includes are relative to that. additional_flags = ['-I' + os.path.join(chrome_root)] # Version of Clang used to compile Chromium can be newer then version of # libclang that YCM uses for completion. So it's possible that YCM's libclang # doesn't know about some used warning options, which causes compilation # warnings (and errors, because of '-Werror'); additional_flags.append('-Wno-unknown-warning-option') sys.path.append(os.path.join(chrome_root, 'tools', 'vim')) from ninja_output import GetNinjaOutputDirectory out_dir = os.path.realpath(GetNinjaOutputDirectory(chrome_root)) clang_line = GetClangCommandLineFromNinjaForSource( out_dir, GetBuildableSourceFile(chrome_root, filename)) if not clang_line: # If ninja didn't know about filename or it's companion files, then try a # default build target. It is possible that the file is new, or build.ninja # is stale. clang_line = GetClangCommandLineFromNinjaForSource( out_dir, GetDefaultSourceFile(chrome_root, filename)) if not clang_line: return additional_flags return GetClangOptionsFromCommandLine(clang_line, out_dir, additional_flags) def FlagsForFile(filename): """This is the main entry point for YCM. Its interface is fixed. Args: filename: (String) Path to source file being edited. Returns: (Dictionary) 'flags': (List of Strings) Command line flags. 'do_cache': (Boolean) True if the result should be cached. """ abs_filename = os.path.abspath(filename) chrome_root = FindChromeSrcFromFilename(abs_filename) clang_flags = GetClangOptionsFromNinjaForFilename(chrome_root, abs_filename) # If clang_flags could not be determined, then assume that was due to a # transient failure. Preventing YCM from caching the flags allows us to try to # determine the flags again. should_cache_flags_for_file = bool(clang_flags) final_flags = _default_flags + clang_flags return { 'flags': final_flags, 'do_cache': should_cache_flags_for_file }
danakj/chromium
tools/vim/chromium.ycm_extra_conf.py
Python
bsd-3-clause
12,768
[ "VisIt" ]
cc340a532248c82993b990ba3d6d198550462e1c49aebe66563a830c211f9f9a
""" The ansible action handles generating a custom inventory file and running the supplied playbook on the stack instances. This also includes cloning the playbook down from a remote url if supplied instead of a path and installing the requirements from a requirements.yml file if one is found in the playbook directory. The ansible action takes a 1. playbook name 2. a path or url for the playbook location 3. a vault_pass or a vault_key_file if your playbook has a vault in it. 4. tags or skip_tags to modify what tasks in the playbook run. 5. extra_vars for variables that need to be passed in and 6. opt_flags to add any extra flags to the ansible command. 7. name of the stack to run on. TODO: - Provide an option to supply a dict for mapping instance tags to groups in the inventory file that are used in the playbook. - Tests. - Provide a sample playbook repo with a requirements.yml file """ import os import anyconfig import jsonschema import envoy import shutil import logging from git.repo.base import Repo from within.shell import working_directory from tempfile import mkdtemp from shepherd.stack import Stack from shepherd.common.plugins import Action logger = logging.getLogger(__name__) class Ansible(Action): def __init__(self): super(Ansible, self).__init__() self._working_dir = None self.path = None self.playbook = None self.inventory = None def validate(self, **kwargs): """ Validates the kwargs with the schema file. Args: kwargs (dict): a dictionary of settings. """ logger.debug('Validating settings...') path = os.path.dirname(os.path.realpath(__file__)) schema_file = os.path.join(path, 'ansible.schema') assert os.path.isfile(schema_file) schema = anyconfig.load(schema_file, 'json') jsonschema.validate(kwargs, schema) def run(self, config, **kwargs): """ Run the ansible playbook on all hosts in the stack. Args: kwargs (dict): a dictionary of settings. Notes: * Default naming for the inventory files use:: [tag_stack_name_{stack_name}] {all instance ips} [tag_local_name_{local_name}] {instance ip} ... """ self._working_dir = mkdtemp(prefix=__name__) stack = Stack.restore(kwargs['name'], config) try: self.path = '{}/playbook'.format(self._working_dir) self.playbook = os.path.join(self.path, kwargs["playbook"]) self.inventory = '{}/inventory'.format(self._working_dir) self.validate(**kwargs) self.install_playbook(**kwargs) # After the playbook has been installed ensure that the playbook # in the working directory exists. if not os.path.isfile(self.playbook): raise ValueError('Playbook %s is not a file', self.playbook) self.install_requirements() self.build_inventory(stack) if 'vault_pass' in kwargs or 'vault_key_file' in kwargs: self.passfile = '{}/vaultpass'.format(self._working_dir) logger.debug('Setting up vault password file...') if 'vault_pass' in kwargs: with open(self.passfile, 'w') as handle: handle.write(kwargs['vault_pass']) elif 'vault_key_file' in kwargs: assert os.path.isfile(kwargs['vault_key_file']) os.symlink( os.path.realpath(kwargs['vault_key_file']), self.passfile ) # There is a slight security issue where if the vault_pass file # is moved between the closing of the file above and it getting # opened by ansible in the cmd below the password file won't get # cleaned up. cmd = 'ansible-playbook -i {} {}'.format(self.inventory, self.playbook) if 'vault_pass' in kwargs or 'vault_key_file' in kwargs: cmd = ('{} --vault-password-file={}'.format(cmd, self.passfile)) if 'extra_vars' in kwargs: cmd = '{} --extra-vars \"{}\"'.format(cmd, kwargs['extra_vars']) if 'tags' in kwargs: cmd = '{} --tags={}'.format(cmd, kwargs['tags']) if 'skip_tags' in kwargs: cmd = '{} --skip_tags={}'.format(cmd, kwargs['skip_tags']) if 'opt_flags' in kwargs: # Should probably do proper validation on these, but # I don't think it should be used very often. cmd = '{} {}'.format(cmd, kwargs['opt_flags']) # Log envoy output result = envoy.run(cmd) logger.debug(result.std_out) logger.warn(result.std_err) finally: logger.debug('Deleting working directory %s', self._working_dir) shutil.rmtree(self._working_dir) return stack def install_playbook(self, **kwargs): """ Validates that self._playbook is a valid path or url. If it is a url git clone to /tmp. If it has a requires file install dependencies. """ logger.debug('Installing playbook...') if "url" in kwargs: # Should probably extract the playbook name from the playbook # URL Repo.clone_from( kwargs["url"], self.path ) elif "path" in kwargs: if os.path.exists(kwargs["path"]): path = None if os.path.isfile(kwargs["path"]): path = os.path.dirname(kwargs["path"]) else: path = kwargs["path"] os.symlink(os.path.realpath(path), self.path,) else: raise ValueError( 'The path value provided (%s) does not exists', kwargs['path'] ) assert os.path.exists(self.path) def install_requirements(self): logger.debug('Installing requirements...') requirements_path = os.path.join(self.path, "requirements.yml") if os.path.exists(requirements_path): with working_directory(os.path.dirname(self.playbook)): envoy.run('ansible-galaxy install -r requirements.yml') def build_inventory(self, stack): logger.debug('Buidling the inventory file...') instances = stack.get_resource_by_type('Instance') common_name = 'tag_stack_name_{}'.format(stack.global_name) inventory_dict = { common_name: [], } for instance in instances: inventory_dict[common_name].append(instance.ip) if instance.local_name in inventory_dict: inventory_dict[instance.local_name].append(instance.ip) else: inventory_dict[instance.local_name] = [instance.ip] with open(self.inventory, 'w') as handle: content = '' for key, ips in inventory_dict.items(): content += '[tag_local_name_{}]\n'.format(key) for ip in ips: content += '{}\n'.format(ip) logger.debug('Inventory content:\n %s', content) handle.write(content)
invenia/shepherd
shepherd/actions/ansible.py
Python
mpl-2.0
7,481
[ "Galaxy" ]
cbdf55b666cb21ccbc33df7ee59335720a8c8ae58138d1a1467e38cad19000a7
# -*- coding: utf-8 -*- # # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2003-2005 Donald N. Allingham # Copyright (C) 2008 Brian G. Matherly # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Rewritten in 2008 for 3.x version by Łukasz Rymarczyk # Written in 2007 by Piotr Czubaszek, largely based on rel_de.py by Alex Roitman. # PL: Po objaśnienia oznaczania relacji zobacz relationship.py # EN: For more information see relationship.py # """ Polish-specific definitions of relationships. """ #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from gramps.gen.lib import Person import gramps.gen.relationship #------------------------------------------------------------------------- # określa liczebnik porządkowy _level_name = [ "pierwszego", "drugiego", "trzeciego", "czwartego", "piątego", "szóstego", "siódmego", "ósmego", "dziewiątego", "dziesiątego", "jedenastego", "dwunastego","trzynastego", "czternastego", "piętnastego", "szesnastego", "siedemnastego", "osiemnastego","dziewiętnastego", "dwudziestego", ] _father_level = [ "", "ojciec", "dziadek", "pradziadek", "prapradziadek", "praprapradziadek", "prapraprapradziadek", "praprapraprapradziadek", "prapraprapraprapradziadek", "praprapraprapraprapradziadek", "prapraprapraprapraprapradziadek", ] _mother_level = [ "", "matka", "babcia", "prababcia", "praprababcia", "prapraprababcia", "praprapraprababcia", "prapraprapraprababcia", "praprapraprapraprababcia", "prapraprapraprapraprababcia", "praprapraprapraprapraprababcia", ] _son_level = [ "", "syn", "wnuk", "prawnuk", "praprawnuk", "prapraprauwnuk", "praprapraprauwnuk", "prapraprapraprawnuk", "praprapraprapraprawnuk", "prapraprapraprapraprawnuk", "praprapraprapraprapraprawnuk", ] _daughter_level = [ "", "córka", "wnuczka", "prawnuczka", "praprawnuczka", "prapraprauwnuczka", "praprapraprauwnuczka", "prapraprapraprawnuczka", "praprapraprapraprawnuczka", "prapraprapraprapraprawnuczka", "praprapraprapraprapraprawnuczka", ] _sister_level_of_male = [ "", "siostra", "ciotka stryjeczna", "babcia stryjeczna", "prababcia stryjeczna", "praprababcia stryjeczna", "prapraprababcia stryjeczna", "praprapraprababcia stryjeczna", "prapraprapraprababcia stryjeczna", "praprapraprapraprababcia stryjeczna", "prapraprapraprapraprababcia stryjeczna", "praprapraprapraprapraprababcia stryjeczna", ] _sister_level_of_female = [ "", "siostra", "ciotka", "babcia cioteczna", "prababcia cioteczna", "praprababcia cioteczna", "prapraprababcia cioteczna", "praprapraprababcia cioteczna", "prapraprapraprababcia cioteczna", "praprapraprapraprababcia cioteczna", "prapraprapraprapraprababcia cioteczna", "praprapraprapraprapraprababcia cioteczna", ] _brother_level_of_male = [ "", "brat", "stryj", "dziadek stryjeczny", "pradziadek stryjeczny", "prapradziadek stryjeczny", "praprapradziadek stryjeczny", "prapraprapradziadek stryjeczny", "praprapraprapradziadek stryjeczny", "prapraprapraprapradziadek stryjeczny", "praprapraprapraprapradziadek stryjeczny", "prapraprapraprapraprapradziadek stryjeczny", ] _brother_level_of_female = [ "", "brat", "wuj", "dziadek cioteczny", "pradziadek cioteczny", "prapradziadek cioteczny", "praprapradziadek cioteczny", "prapraprapradziadek cioteczny", "praprapraprapradziadek cioteczny", "prapraprapraprapradziadek cioteczny", "praprapraprapraprapradziadek cioteczny", "prapraprapraprapraprapradziadek cioteczny", ] _nephew_level_of_brothers_son = [ "", "bratanek", "syn bratanka", "wnuk bratanka", "prawnuk bratanka", "praprawnuk bratanka", "prapraprawnuk bratanka", "praprapraprawnuk bratanka", "prapraprapraprawnuk bratanka", "praprapraprapraprawnuk bratanka", "prapraprapraprapraprawnuk bratanka", ] _nephew_level_of_brothers_daughter = [ "", "bratanica", "syn bratanicy", "wnuk bratanicy", "prawnuk bratanicy", "praprawnuk bratanicy", "prapraprawnuk bratanicy", "praprapraprawnuk bratanicy", "prapraprapraprawnuk bratanicy", "praprapraprapraprawnuk bratanicy", "prapraprapraprapraprawnuk bratanicy", "praprapraprapraprapraprawnuk bratanicy", ] _nephew_level_of_sisters_son = [ "", "siostrzeniec", "syn siostrzeńca", "wnuk siostrzeńca", "prawnuk siostrzeńca", "praprawnuk siostrzeńca", "prapraprawnuk siostrzeńca", "praprapraprawnuk siostrzeńca", "prapraprapraprawnuk siostrzeńca", "praprapraprapraprawnuk siostrzeńca", "prapraprapraprapraprawnuk siostrzeńca", ] _nephew_level_of_sisters_daughter = [ "", "siostrzenica", "syn siostrzenicy", "wnuk siostrzenicy", "prawnuk siostrzenicy", "praprawnuk siostrzenicy", "prapraprawnuk siostrzenicy", "praprapraprawnuk siostrzenicy", "prapraprapraprawnuk siostrzenicy", "praprapraprapraprawnuk siostrzenicy", "prapraprapraprapraprawnuk siostrzenicy", ] _niece_level_of_brothers_son = [ "", "bratanica", "córka bratanka", "wnuczka bratanka", "prawnuczka bratanka", "praprawnuczka bratanka", "prapraprawnuczka bratanka", "praprapraprawnuczka bratanka", "prapraprapraprawnuczka bratanka", "praprapraprapraprawnuczka bratanka", ] _niece_level_of_brothers_daughter = [ "", "bratanica", "córka bratanicy", "wnuczka bratanicy", "prawnuczka bratanicy", "praprawnuczka bratanicy", "prapraprawnuczka bratanicy", "praprapraprawnuczka bratanicy", "prapraprapraprawnuczka bratanicy", "praprapraprapraprawnuczka bratanicy", ] _niece_level_of_sisters_son = [ "", "siostrzenica", "córka siostrzeńca", "wnuczka siostrzeńca", "prawnuczka siostrzeńca", "praprawnuczka siostrzeńca", "prapraprawnuczka siostrzeńca", "praprapraprawnuczka siostrzeńca", "prapraprapraprawnuczka siostrzeńca", "praprapraprapraprawnuczka siostrzeńca", ] _niece_level_of_sisters_daughter = [ "", "siostrzenica", "córka siostrzenicy", "wnuczka siostrzenicy", "prawnuczka siostrzenicy", "praprawnuczka siostrzenicy", "prapraprawnuczka siostrzenicy", "praprapraprawnuczka siostrzenicy", "prapraprapraprawnuczka siostrzenicy", "praprapraprapraprawnuczka siostrzenicy", ] #------------------------------------------------------------------------- # # # #------------------------------------------------------------------------- class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator): """ RelationshipCalculator Class """ def __init__(self): gramps.gen.relationship.RelationshipCalculator.__init__(self) def get_son(self, level, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo potomek męski (np. syn) jest spokrewniony do danej osoby """ # Określ, czy osoba jest przybraną, czy rodzoną if inlaw == '': t_inlaw = "" else: t_inlaw = "przybrany " # TODO: dodać rozpoznawanie pasierb/pasierbica if level >= 0 and level < len(_son_level): return t_inlaw +_son_level[level] elif level >= len(_son_level) and (level - 1) < len(_level_name): return t_inlaw + \ "potomek męski %s pokolenia" % _level_name[level - 1] else: return t_inlaw + \ "potomek męski w %d pokoleniu" % level def get_daughter(self, level, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo potomek żeński (np. córka) jest spokrewniony do danej osoby """ # Określ, czy osoba jest przybraną, czy rodzoną # + stwórz obie formy (męską i żeńską) if inlaw == '': t_inlaw = "" t_inlawM = "" else: t_inlaw = "przybrana " t_inlawM = "przybrany " # TODO: dodać rozpoznawanie pasierb/pasierbica if level >= 0 and level < len(_daughter_level): return t_inlaw + _daughter_level[level] elif level >= len(_daughter_level) and (level - 1) < len(_level_name): return t_inlawM + \ "potomek żeński %s pokolenia" % _level_name[level - 1] else: return t_inlawM + \ "potomek żeński w %d pokoleniu" % level def get_child_unknown(self, level, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo potomek o nieokreślonej płci jest spokrewniony dodanej osoby """ # Określ, czy osoba jest przybraną, czy rodzoną if inlaw == '': t_inlaw = "" else: t_inlaw = "przybrany " if level == 1: if inlaw == '' : return "dziecko" else: return "przybrane dziecko" elif level >= 1 and (level - 1) < len(_level_name): return t_inlaw + "potomek %s pokolenia" % _level_name[level - 1] else: return t_inlaw + "potomek w %d pokoleniu" % level def get_sword_distaff(self, level, reltocommon, spacebefore = ""): """ PL: Generuje relację po mieczu/po kądzieli EN: Generate relation 'by sword' or 'by distaff', polish specific """ if level <= 1: return "" elif level == 2: # dziadek/babcia if reltocommon[0] == self.REL_FATHER: # ze strony rodzonego ojca return spacebefore + "po mieczu" elif reltocommon[0] == self.REL_MOTHER: # ze strony rodzonej matki return spacebefore + "po kądzieli" else: # relacja inna niż rodzona return "" elif level == 3: # pradziadek/prababcia if (reltocommon[0] == self.REL_FATHER) \ & (reltocommon[1] == self.REL_FATHER): # pradziadek od dziadka ze strony ojca return spacebefore + "podwójnego miecza" elif (reltocommon[0] == self.REL_FATHER) \ & (reltocommon[1] == self.REL_MOTHER): # pradziadek od babci ze strony ojca return spacebefore + "raz po mieczu, dalej po kądzieli" elif (reltocommon[0] == self.REL_MOTHER) \ & (reltocommon[1] == self.REL_FATHER): # pradziadek od dziadka ze strony matki return spacebefore + "raz po kądzieli, dalej po mieczu" elif (reltocommon[0] == self.REL_MOTHER) \ & (reltocommon[1] == self.REL_MOTHER): # pradziadek od babci ze strony matki return spacebefore + "podwójnej kądzieli" else: # relacja inna niż rodzona return "" elif level == 4: # prapradziadek/praprababcia if (reltocommon[0] == self.REL_FATHER) \ & (reltocommon[1] == self.REL_FATHER) \ & (reltocommon[2] == self.REL_FATHER): # tzw. linia męska return spacebefore + "potrójnego miecza" if (reltocommon[0] == self.REL_FATHER) \ & (reltocommon[1] == self.REL_FATHER) \ & (reltocommon[2] == self.REL_FATHER): # tzw. linia żeńska return spacebefore + "potrójnego miecza" else: return "" else: return "" def get_father(self, level, reltocommon, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo przodek męski (np. ojciec) jest spokrewniony do danej osoby """ if inlaw == '': t_inlaw = "" else: t_inlaw = "przybrany " if level >= 0 and level < len(_father_level): # Jeśli znasz bezpośrednią nazwę relacji, to ją zastosuj if level == 1: # ojciec return t_inlaw + _father_level[level] elif (level >= 2) & (level <= 4): # dziadek, pradziadek, prapradziadek return t_inlaw + _father_level[level] \ + self.get_sword_distaff(level, reltocommon, ' ') else: return t_inlaw + _father_level[level] elif level >= len(_father_level) and (level - 1) < len(_level_name): # jeśli istnieje liczebnik dla danej liczby return t_inlaw + \ "przodek męski %s pokolenia" % (_level_name[level - 1]) else: # dla pozostałych przypadków wypisz relację liczbowo return t_inlaw + \ "przodek męski w %d pokoleniu" % level def get_mother(self, level, reltocommon, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo przodek żeński (np. matka) jest spokrewniony do danej osoby """ if inlaw == '': t_inlaw = "" else: t_inlaw = "przybrana " if level >= 0 and level < len(_mother_level): # Jeśli znasz bezpośrednią nazwę relacji, to ją zastosuj if level == 1: # matka return t_inlaw + _mother_level[level] elif (level >= 2) & (level <= 4): # babcia, prababcia, praprababcia return t_inlaw + _mother_level[level] \ + self.get_sword_distaff(level, reltocommon, ' ') else: return t_inlaw + _mother_level[level] elif level >= len(_mother_level) and (level - 1) < len(_level_name): # jeśli istnieje liczebnik dla danej liczby return t_inlaw + \ "przodek żeński %s pokolenia" % (_level_name[level - 1]) else: # dla pozostałych przypadków wypisz relację liczbowo return t_inlaw +"przodek żeński w %d pokoleniu" % level def get_parent_unknown(self, level, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo przodek o nieokreślonej płci jest spokrewniony dodanej osoby """ if inlaw == '': t_inlaw = "" else: t_inlaw = "przybrany " if level == 1: return t_inlaw + "rodzic" elif level > 1 and (level - 1) < len(_level_name): if (level >= 2) & (level <= 4): # babcia, prababcia, praprababcia # (albo dziadek, pradziadek, prapradziadek) tmp = t_inlaw +\ "przodek %s pokolenia" % (_level_name[level - 1]) # TODO: try to recognize a gender... return tmp # + self.get_sword_distaff(level, reltocommon, ' ') else: return t_inlaw + \ "przodek %s pokolenia" % (_level_name[level - 1]) else: return t_inlaw +"przodek w %d pokoleniu" % level def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b, reltocommon_a, reltocommon_b, only_birth=True, in_law_a=False, in_law_b=False): """ Provide a string that describes the relationsip between a person, and another person. E.g. "grandparent" or "child". """ if only_birth: step = '' else: step = self.STEP if in_law_a or in_law_b : inlaw = self.INLAW else: inlaw = '' # b is the same person as a if Ga == Gb == 0: rel_str = 'ta sama osoba' elif Ga == 0: # b is son/descendant of a if gender_b == Person.MALE: if inlaw and Gb == 1 and not step: rel_str = "zięć" else: rel_str = self.get_son(Gb, inlaw) elif gender_b == Person.FEMALE: if inlaw and Gb == 1 and not step: rel_str = "synowa" else: rel_str = self.get_daughter(Gb, inlaw) else: rel_str = self.get_child_unknown(Gb, inlaw) elif Gb == 0: # b is parent/grand parent of a if gender_b == Person.MALE: if inlaw and Gb == 1 and not step: # TODO: znaleźć odpowiedniki w zależności czy to syn/córka rel_str = "teść" else: rel_str = self.get_father(Ga, reltocommon_a, inlaw) elif gender_b == Person.FEMALE: if inlaw and Gb == 1 and not step: # TODO: znaleźć odpowiedniki w zależności czy to syn/córka rel_str = "teściowa" else: rel_str = self.get_mother(Ga, reltocommon_a, inlaw) else: rel_str = self.get_parent_unknown(Ga, inlaw) elif Ga == Gb == 1: # rodzeństwo if gender_b == Person.MALE: if inlaw and not step: rel_str = "brat przyrodni" else: rel_str = "brat rodzony" elif gender_b == Person.FEMALE: if inlaw and not step: rel_str = "siostra przyrodnia" else: rel_str = "siostra rodzony" else: rel_str = "brat/siostra" elif Gb == 1 and Ga > 1: # Przyjmij, że nie rozróżniamy osób prawnie i nieprawnie przybranych... if Ga == 2: # rodzeństwo rodziców # brat ojca, czyli stryj if (gender_b == Person.MALE) \ & (reltocommon_a[0] == self.REL_FATHER): rel_str = "stryj" # siostra ojca, czyli ciotka ??? elif (gender_b == Person.FEMALE) \ & (reltocommon_a[0] == self.REL_FATHER): rel_str = "ciotka (tzw. stryjna)" # brat matki, czyli wuj/wujek elif (gender_b == Person.MALE) \ & (reltocommon_a[0] == self.REL_MOTHER): rel_str = "wuj (wujek)" # siostra matki, czyli ciotka elif (gender_b == Person.FEMALE) \ & (reltocommon_a[0] == self.REL_MOTHER): rel_str = "ciotka" else: rel_str = "brat lub siostra rodzica" elif Ga == 3: # rodzeństwo dziadków rodziców osoby sprawdzanej # rodzeństwo dziadka po mieczu (ojca ojca) if (reltocommon_a[0] == self.REL_FATHER) \ & (reltocommon_a[1] == self.REL_FATHER): if (gender_b == Person.MALE): rel_str = "dziadek stryjeczny (tzw przestryj, stary stryj)" elif (gender_b == Person.FEMALE): rel_str = "babcia stryjeczna" else: rel_str = "rodzeństwo przodka w 2 pokoleniu" # rodzeństwo babki po mieczu (matki ojca) elif (reltocommon_a[0] == self.REL_FATHER) \ & (reltocommon_a[1] == self.REL_MOTHER): # TODO: Należy sprawdzić, czy w staropolszczyźnie nie ma # dokładniejszych określeń dla tego typu relacji # TODO: EN: Try to check, whether in old polish language # are more specific word for this kind of relation if (gender_b == Person.MALE): rel_str = "dziadek stryjeczny (tzw przestryj, stary stryj)" elif (gender_b == Person.FEMALE): rel_str = "babcia stryjeczna" else: rel_str = "rodzeństwo przodka w 2 pokoleniu" # rodzeństwo dziadka po kądzieli (ojca matki) elif (reltocommon_a[0] == self.REL_MOTHER) \ & (reltocommon_a[1] == self.REL_FATHER): # TODO: Należy sprawdzić, czy w staropolszczyźnie nie ma # dokładniejszych określeń dla tego typu relacji # TODO: EN: Try to check, whether in old polish language # are more specific word for this kind of relation if (gender_b == Person.MALE): rel_str = "dziadek cioteczny (starop. prapociot)" elif (gender_b == Person.FEMALE): rel_str = "babcia cioteczna (starop. praciota)" else: rel_str = "rodzeństwo przodka w 2 pokoleniu" # rodzeństwo babki po kądzieli (matki matki) elif (reltocommon_a[0] == self.REL_MOTHER) \ & (reltocommon_a[1] == self.REL_MOTHER): # TODO: Należy sprawdzić, czy w staropolszczyźnie nie ma # dokładniejszych określeń dla tego typu relacji # TODO: EN: Try to check, whether in old polish language # are more specific word for this kind of relation if (gender_b == Person.MALE): rel_str = "dziadek cioteczny (starop. prapociot)" elif (gender_b == Person.FEMALE): rel_str = "babcia cioteczna (starop. praciota)" else: rel_str = "rodzeństwo przodka w 2 pokoleniu" else: if (gender_b == Person.MALE): rel_str = "rodzeństwo dziadka" elif (gender_b == Person.FEMALE): rel_str = "rodzeństwo babci" else: rel_str = "rodzeństwo przodka w 2 pokoleniu" elif Ga > 3: # pradziadkowie... (grandparents) if (gender_b == Person.MALE) \ & (reltocommon_a[0] == self.REL_FATHER): if Ga >= 0 and Ga < len(_brother_level_of_male): rel_str = _brother_level_of_male[Ga] else: rel_str = "rodzeństwo przodka męskiego %d pokolenia" % Ga elif (gender_b == Person.FEMALE) \ & (reltocommon_a[0] == self.REL_FATHER): if Ga >= 0 and Ga < len(_sister_level_of_male): rel_str = _sister_level_of_male[Ga] else: rel_str = "rodzeństwo przodka żeńskiego %d pokolenia" % Ga elif (gender_b == Person.MALE) \ & (reltocommon_a[0] == self.REL_MOTHER): if Ga >= 0 and Ga < len(_brother_level_of_female): rel_str = _brother_level_of_male[Ga] else: rel_str = "rodzeństwo przodka męskiego %d pokolenia" % Ga elif (gender_b == Person.FEMALE) \ & (reltocommon_a[0] == self.REL_MOTHER): if Ga >= 0 and Ga < len(_sister_level_of_female): rel_str = _sister_level_of_male[Ga] else: rel_str = "rodzeństwo przodka żeńskiego %d pokolenia" % Ga else: rel_str = "rodzeństwo przodka %d pokolenia" % Ga else: # A program should never goes there, but... rel_str = "Relacja nie określona" elif Ga ==1 and Gb > 1: # syn brata if (gender_b == Person.MALE) \ & (reltocommon_b[0] == self.REL_FATHER): if Gb < len(_nephew_level_of_brothers_son): rel_str = _nephew_level_of_brothers_son[Gb] else: rel_str = "męski potomek w %d pokoleniu brata" % Gb # córka brata elif (gender_b == Person.FEMALE) \ & (reltocommon_b[0] == self.REL_FATHER): if Gb < len(_nephew_level_of_brothers_daughter): rel_str = _nephew_level_of_brothers_daughter[Gb] else: rel_str = "żeński potomek w %d pokoleniu brata" % Gb # syn siostry if (gender_b == Person.MALE) \ & (reltocommon_b[0] == self.REL_MOTHER): if Gb < len(_nephew_level_of_sisters_son): rel_str = _nephew_level_of_sisters_son[Gb] else: rel_str = "męski potomek w %d pokoleniu brata" % Gb # córka siostry elif (gender_b == Person.FEMALE) \ & (reltocommon_b[0] == self.REL_MOTHER): if Gb < len(_nephew_level_of_sisters_daughter): rel_str = _nephew_level_of_sisters_daughter[Gb] else: rel_str = "żeński potomek w %d pokoleniu brata" % Gb # potomek brata elif (reltocommon_b[0] == self.REL_FATHER): rel_str = "potomek w %d pokoleniu brata" % Gb # potomek brata elif (reltocommon_b[0] == self.REL_MOTHER): rel_str = "potomek w %d pokoleniu brata" % Gb else : rel_str = "potomek w %d pokoleniu rodzeństwa" % Gb elif Ga > 1 and Gb > 1: if (gender_b == Person.MALE): if Ga == 2 and Gb == 2: rel_str = "kuzyn" else: rel_str = "daleki kuzyn (%d. stopień pokrewieństwa)" % (Ga+Gb) elif (gender_b == Person.FEMALE): if Ga == 2 and Gb == 2: rel_str = "kuzynka" else: rel_str = "daleka kuzynka (%d. stopień pokrewieństwa)" % (Ga+Gb) else: if Ga == 2 and Gb == 2: rel_str = "kuzyn(ka)" else: rel_str = "daleki członek rodziny (%d. stopień pokrewieństwa)" % (Ga+Gb) else: # A program should never goes there, but... rel_str = "nieokreślony stopień pokrewieństwa" return rel_str def get_plural_relationship_string(self, Ga, Gb, reltocommon_a='', reltocommon_b='', only_birth=True, in_law_a=False, in_law_b=False): """ Generate a text with information, how far away is a group of persons from a main person """ if Ga == Gb == 0: return 'ta sama osoba' if 0 == Ga: if 1 == Gb: return 'Dzieci' if 2 == Gb: return 'Wnuki' if 3 == Gb: return 'Prawnuki' if 4 == Gb: return 'Praprawnuki' return 'Praprapra(n)wnuki' if 0 == Gb: if 1 == Ga: return 'Rodzice' if 2 == Ga: return 'Dziadkowie' if 3 == Ga: return 'Pradziadkowie' if 4 == Ga: return 'Praprapradziadkowie' return 'Praprapra(n)dziadkowie' if 1 == Ga == Gb: return 'Rodzeństwo' if 1 == Gb and Ga > 1: return 'Wujowie/stryjowie i ciocie' if 1 < Gb and 1 == Ga: return 'bratankowie(ice)/siostrzeńcy(nice)' if 1 < Ga and 1 < Gb: return 'dalsza rodzina' return 'relacja nieznana' def get_sibling_relationship_string(self, sib_type, gender_a, gender_b, in_law_a=False, in_law_b=False): if in_law_a or in_law_b : inlaw = self.INLAW else: inlaw = '' if sib_type == self.NORM_SIB: if not inlaw: if gender_b == Person.MALE: rel_str = 'brat (rodzony)' elif gender_b == Person.FEMALE: rel_str = 'siostra (rodzona)' else: rel_str = 'brat lub siostra (rodzeni)' else: if gender_b == Person.MALE: # TODO: znaleźć odpowiednik rel_str = "brat (pasierb)" elif gender_b == Person.FEMALE: # TODO: znaleźć odpowiednik rel_str = "siostra (pasierbica)" else: # TODO: znaleźć odpowiednik rel_str = "brat lub siostra (pasierb/pasierbica)" elif sib_type == self.UNKNOWN_SIB: if not inlaw: if gender_b == Person.MALE: rel_str = 'brat' elif gender_b == Person.FEMALE: rel_str = 'siostra' else: rel_str = 'brat lub siostra' else: if gender_b == Person.MALE: # TODO: znaleźć odpowiednik rel_str = "brat (brat/szwagier)" elif gender_b == Person.FEMALE: # TODO: znaleźć odpowiednik rel_str = "siostra (bratowa/szwagierka)" else: # TODO: znaleźć odpowiednik rel_str = "brat lub siostra (szwagier/szagierka)" elif sib_type == self.HALF_SIB_FATHER: if gender_b == Person.MALE: rel_str = "brat przyrodni" elif gender_b == Person.FEMALE: rel_str = "siostra przyrodnia" else: rel_str = "brat/siostra przyrodni" elif sib_type == self.HALF_SIB_MOTHER: if gender_b == Person.MALE: rel_str = "brat przyrodni" elif gender_b == Person.FEMALE: rel_str = "siostra przyrodnia" else: rel_str = "brat/siostra przyrodni" elif sib_type == self.STEP_SIB: if gender_b == Person.MALE: rel_str = "brat przyrodni" elif gender_b == Person.FEMALE: rel_str = "siostra przyrodnia" else: rel_str = "brat lub siostra przyrodnia" else: rel_str = "nieokreślona relacja rodzeństwa" return rel_str if __name__ == "__main__": # Test function. Call it as follows from the command line (so as to find # imported modules): # export PYTHONPATH=/path/to/gramps/src # python src/plugins/rel/rel_pl.py """TRANSLATORS, copy this if statement at the bottom of your rel_xx.py module, and test your work with: python src/plugins/rel/rel_xx.py """ from gramps.gen.relationship import test RC = RelationshipCalculator() test(RC, True)
SNoiraud/gramps
gramps/plugins/rel/rel_pl.py
Python
gpl-2.0
32,097
[ "Brian" ]
6126e26e1f50d4e1441e273158e90137e6b86c000e6b55df02b677d6bb07e2dc
''' Steps for problem.feature lettuce tests ''' # pylint: disable=C0111 # pylint: disable=W0621 from lettuce import world, step from lettuce.django import django_url from common import i_am_registered_for_the_course, TEST_SECTION_NAME from problems_setup import PROBLEM_DICT, answer_problem, problem_has_answer, add_problem_to_course from nose.tools import assert_equal @step(u'I am viewing a "([^"]*)" problem with "([^"]*)" attempt') def view_problem_with_attempts(step, problem_type, attempts): i_am_registered_for_the_course(step, 'model_course') # Ensure that the course has this problem type add_problem_to_course('model_course', problem_type, {'attempts': attempts}) # Go to the one section in the factory-created course # which should be loaded with the correct problem chapter_name = TEST_SECTION_NAME.replace(" ", "_") section_name = chapter_name url = django_url('/courses/edx/model_course/Test_Course/courseware/%s/%s' % (chapter_name, section_name)) world.browser.visit(url) @step(u'I am viewing a "([^"]*)" that shows the answer "([^"]*)"') def view_problem_with_show_answer(step, problem_type, answer): i_am_registered_for_the_course(step, 'model_course') # Ensure that the course has this problem type add_problem_to_course('model_course', problem_type, {'showanswer': answer}) # Go to the one section in the factory-created course # which should be loaded with the correct problem chapter_name = TEST_SECTION_NAME.replace(" ", "_") section_name = chapter_name url = django_url('/courses/edx/model_course/Test_Course/courseware/%s/%s' % (chapter_name, section_name)) world.browser.visit(url) @step(u'I am viewing a "([^"]*)" problem') def view_problem(step, problem_type): i_am_registered_for_the_course(step, 'model_course') # Ensure that the course has this problem type add_problem_to_course('model_course', problem_type) # Go to the one section in the factory-created course # which should be loaded with the correct problem chapter_name = TEST_SECTION_NAME.replace(" ", "_") section_name = chapter_name url = django_url('/courses/edx/model_course/Test_Course/courseware/%s/%s' % (chapter_name, section_name)) world.browser.visit(url) @step(u'External graders respond "([^"]*)"') def set_external_grader_response(step, correctness): assert(correctness in ['correct', 'incorrect']) response_dict = {'correct': True if correctness == 'correct' else False, 'score': 1 if correctness == 'correct' else 0, 'msg': 'Your problem was graded %s' % correctness} # Set the fake xqueue server to always respond # correct/incorrect when asked to grade a problem world.xqueue_server.set_grade_response(response_dict) @step(u'I answer a "([^"]*)" problem "([^"]*)ly"') def answer_problem_step(step, problem_type, correctness): """ Mark a given problem type correct or incorrect, then submit it. *problem_type* is a string representing the type of problem (e.g. 'drop down') *correctness* is in ['correct', 'incorrect'] """ assert(correctness in ['correct', 'incorrect']) assert(problem_type in PROBLEM_DICT) answer_problem(problem_type, correctness) # Submit the problem check_problem(step) @step(u'I check a problem') def check_problem(step): world.css_click("input.check") @step(u'The "([^"]*)" problem displays a "([^"]*)" answer') def assert_problem_has_answer(step, problem_type, answer_class): ''' Assert that the problem is displaying a particular answer. These correspond to the same correct/incorrect answers we set in answer_problem() We can also check that a problem has been left blank by setting answer_class='blank' ''' assert answer_class in ['correct', 'incorrect', 'blank'] assert problem_type in PROBLEM_DICT problem_has_answer(problem_type, answer_class) @step(u'I reset the problem') def reset_problem(_step): world.css_click('input.reset') @step(u'I press the button with the label "([^"]*)"$') def press_the_button_with_label(_step, buttonname): button_css = 'button span.show-label' elem = world.css_find(button_css).first assert_equal(elem.text, buttonname) world.css_click(button_css) @step(u'The "([^"]*)" button does( not)? appear') def action_button_present(_step, buttonname, doesnt_appear): button_css = 'section.action input[value*="%s"]' % buttonname if doesnt_appear: assert world.is_css_not_present(button_css) else: assert world.is_css_present(button_css) @step(u'the button with the label "([^"]*)" does( not)? appear') def button_with_label_present(_step, buttonname, doesnt_appear): if doesnt_appear: assert world.browser.is_text_not_present(buttonname, wait_time=5) else: assert world.browser.is_text_present(buttonname, wait_time=5) @step(u'My "([^"]*)" answer is marked "([^"]*)"') def assert_answer_mark(step, problem_type, correctness): """ Assert that the expected answer mark is visible for a given problem type. *problem_type* is a string identifying the type of problem (e.g. 'drop down') *correctness* is in ['correct', 'incorrect', 'unanswered'] """ # Determine which selector(s) to look for based on correctness assert(correctness in ['correct', 'incorrect', 'unanswered']) assert(problem_type in PROBLEM_DICT) # At least one of the correct selectors should be present for sel in PROBLEM_DICT[problem_type][correctness]: has_expected = world.is_css_present(sel) # As soon as we find the selector, break out of the loop if has_expected: break # Expect that we found the expected selector assert(has_expected)
abhinavp13/IITBX-edx-platform-dev
lms/djangoapps/courseware/features/problems.py
Python
agpl-3.0
5,877
[ "VisIt" ]
c274430d38d660297ba39fb58ad9033469444fb98ce1f65b43c5c6773fe61b2f
s = ''' ===== gback ===== This package provides an simpler interface to Google calendars using the google-api-python-client python package and OAuth. Examples -------- Note: See the "Setting up the project on Google" section below before trying the examples. List calendar names:: >>> from gback import GCalSession >>> session = GCalSession('~/gback.oauth') >>> >>> for c in session.names: print c Add an appointment to a named calendar:: >>> from gback import GCalSession >>> # print the output will display the appiointment url. >>> # 'Marc' is the name of the calendar used. >>> session = GCalSession('~/gback.oauth') >>> des='Write a simpler way to use the google calendar api.' >>> print session['Marc'].add('Write Python code.', '20150430', des=des) Create an ical file for all the appointments in the named calendar:: >>> from gback import GCalSession >>> cal_name = 'Marc' >>> session = GCalSession('~/gback.oauth') >>> with open(cal_name + '.ical'), 'w') as fh: >>> fh.write(session[cal_name].events) Setting up the project on Google -------------------------------- Visit: https://console.developers.google.com/project/ Choose "Create Project" Enter a project name. This can be anything. I used "gback" for google backup. Read the agreements and agree to them if you wish to continue. Wait while Activities windows works on setting up your project. Select "APIs & auth" on the left pane to expand menu items. select "Credentials" select "Create new Client ID" select "Installed application" Answer consent screen information prompt. Select your email address and enter your project name in the "Product Name" field. I entered "gback". Click on "Save" If prompted to create another client id. Click on "Cancel". You have to repeat the following steps. But this time "gback" should be shown in the drop down box at the top of the web page. select "Create new Client ID" select "Installed application" select "Other" for the installed application type. Now you should have a "Client ID for native application shown". Select "Download JSON" That will save a JSON file with a client_id and client_secret among other things. It will have a long file name but you can rename it to anything you like, "gback.json", say. Enable APIs: Under "APIs & auth" select "APIs" Select "Google+ API" Then enable it. Under "APIs & auth" select "APIs" Select "Calendar API" Then enable it. The first time you run this program it will launch your browser to log into your Google account. It will get a key and save it to your named OAuth file using the --oauthfn arg. After that it will read your OAuth file to get the key. $ python gback.py -l --clientfn gback.json --oauthfn gback.oauth After logging in using on your browser click on 'Accept' when prompted that gcalcback would ike to "Manage your calendars". From now on the gback.py program should work using the gback.json and gback.oauth files without need for a browser. Note: Running this program requested permission which needed validation using a browser which did not work with w3m because it doesn't support Javascript. I had to run it the first time on ny in xwindows so it could launch chrome for validation. It stored keys etc using Storage() to a file so that it doesn't need to revalidate again. LICENSE ======= The MIT License (MIT) Copyright (c) 2015 The Brookhaven Group, LLC Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' def readme(): return s
Schwarzschild/gback
gback/readme.py
Python
mit
4,544
[ "VisIt" ]
2222be0026d9b8a8ea7435ba3d77198b4ce22d7f6ce095339c496ced0b97192f
import signal from contextlib import ExitStack from functools import lru_cache from pathlib import Path from shutil import rmtree from subprocess import Popen from tempfile import NamedTemporaryFile import os from typing import Mapping, Tuple from pandas import DataFrame from rpy2.robjects import r, NULL as null, StrVector, IntVector from rpy2.robjects.packages import importr from tqdm import tqdm from analyses import active_driver from analyses.active_driver import ActiveDriverResult from helpers.cytoscape import CytoscapeCommand, Cytoscape, get from models import InterproDomain output_dir = Path('analyses_output/active_pathway/') sets_path = Path('data/gene_sets/') @lru_cache() def gmt_from_domains(path=sets_path / 'domains.gmt', include_sub_types=True): """Export sets of genes having the same domains into a GMT file""" with open(path, 'w') as f: query = InterproDomain.query for domain_type in tqdm(query, total=query.count()): # collect all occurrences occurrences = [] occurrences.extend(domain_type.occurrences) if include_sub_types: sub_types = domain_type.children[:] while sub_types: sub_type = sub_types.pop() occurrences.extend(sub_type.occurrences) sub_types.extend(sub_type.children) line = [ domain_type.accession, domain_type.description, *{domain.protein.gene_name for domain in occurrences} ] f.write('\t'.join(line) + '\n') return path GENE_SETS = { # GMT files downloaded from Broad Institute: # these files has to be manually downloaded from # http://software.broadinstitute.org/gsea/msigdb/collections.jsp 'hallmarks': sets_path / 'h.all.v6.1.symbols.gmt', 'all_canonical_pathways': sets_path / 'c2.cp.reactome.v6.1.symbols.gmt', 'gene_ontology': sets_path / 'c5.all.v6.1.symbols.gmt', 'oncogenic': sets_path / 'c6.all.v6.1.symbols.gmt', 'immunologic': sets_path / 'c7.all.v6.1.symbols.gmt', # other gene sets 'human_pathways': 'data/hsapiens.pathways.NAME.gmt', 'drug_targets': sets_path / 'Human_DrugBank_all_symbol.gmt', 'domains': gmt_from_domains } def run_active_pathways( ad_result: ActiveDriverResult, gene_sets_gmt_path: str, cytoscape_dir: Path = None, **kwargs ) -> DataFrame: active_pathways = importr('activeDriverPW') df = ad_result['all_gene_based_fdr'] df = df.set_index('gene')['p'] scores = r['as.matrix'](df) cytoscape_paths = StrVector([ str(cytoscape_dir / name) for name in ['terms.txt', 'groups.txt', 'abridged.gmt'] ]) if cytoscape_dir else null return active_pathways.activeDriverPW(scores, gene_sets_gmt_path, cytoscape_filenames=cytoscape_paths, **kwargs) class EnrichmentMap(CytoscapeCommand): """API for initial processing and loading of enrichment maps in Cytoscape""" build = get(defaults={'analysisType': 'generic'}, plain_text=True) class DeadlyPopen(Popen): """Same as Popen, but commits suicide on exit""" def __init__(self, *args, kill_descendants=True, **kwargs): self.kill_descendants = kill_descendants if kill_descendants: kwargs['preexec_fn'] = os.setsid super().__init__(*args, **kwargs) def __exit__(self, *args, **kwargs): if self.kill_descendants: os.killpg(self.pid, signal.SIGKILL) else: self.terminate() self.kill() def run_all( site_type: str, gene_sets: Mapping[str, Path] = GENE_SETS, gene_set_filter: Tuple[int, int] = (5, 1000), correct=False, **kwargs ): """Runs all active_pathways combinations for given site_type. Uses pan_cancer/clinvar Active Driver analyses results and all provided GMT gene sets. Args: site_type: site filter which will be passed to ActiveDriver analysis gene_sets: gene sets to be considered gene_set_filter: a two-tuple: (min, max) number of genes required to be in a gene set. If not set, the default of (5, 1000) is used Results are saved in `output_dir`. Returns: Mapping of directories with newly computed ActivePathways results """ data_table = importr('data.table') paths = {} kwargs['geneset.filter'] = IntVector(gene_set_filter) for analysis in [active_driver.pan_cancer_analysis, active_driver.clinvar_analysis]: for gene_set in gene_sets: path = output_dir / analysis.name / gene_set / site_type # remove the old results (if any) rmtree(path, ignore_errors=True) # recreate dir path.mkdir(parents=True) path = path.absolute() ad_result = analysis(site_type) print(f'Preparing active pathways: {analysis.name} for {len(ad_result["all_gene_based_fdr"])} genes') print(f'Gene sets/background: {gene_set}') gene_sets_path = gene_sets[gene_set] if callable(gene_sets_path): gene_sets_path = gene_sets_path() result = run_active_pathways(ad_result, str(gene_sets_path), cytoscape_dir=path, correct=correct, **kwargs) data_table.fwrite(result, str(path / 'pathways.tsv'), sep='\t', sep2=r.c('', ',', '')) paths[(analysis, gene_set)] = path return paths def generate_enrichment_maps(paths, cytoscape_path): """Generate Cytoscape `cytoscape_path` should point to a dir with 'cytoscape.sh' script in it. """ with ExitStack() as stack: cytoscape_server_path = Path(cytoscape_path) / 'cytoscape.sh' # start cytoscape REST API server in a context manager # (so it will be close automatically upon exit or error) port = 1234 cytoscape_out = stack.enter_context(NamedTemporaryFile()) print('Starting Cytoscape...') print(f'All the output will be saved in {cytoscape_out.name}') stack.enter_context( DeadlyPopen( [cytoscape_server_path, '-R', str(port)], kill_descendants=True, stdout=cytoscape_out ) ) cytoscape = Cytoscape() enrichment_app = EnrichmentMap(port=port) for (analysis, gene_set), path in paths.items(): enrichment_path = str(path / 'terms.txt') gmt_path = path / 'abridged.gmt' if gmt_path.exists(): response = enrichment_app.build(gmtFile=str(gmt_path), enrichmentsDataset1=enrichment_path) print('Cytoscape Enrichment Map:', response) cytoscape.session.save_as(data={'file': str(path / 'enrichment_map.cys')}) cytoscape.session.new() else: print(f'No gmt file for {analysis.name} & {gene_set} - no cytoscape network will be created')
reimandlab/ActiveDriverDB
website/analyses/gene_sets.py
Python
lgpl-2.1
6,995
[ "Cytoscape" ]
a524628ed528ad348345ca5a7e65aad1f9c2e56731f390d83bd94a2434524d36
"Yang/Wu's OEP implementation, in PyQuante." from math import sqrt import settings from PyQuante.NumWrap import zeros,matrixmultiply,transpose,dot,identity,\ array,solve from PyQuante.Ints import getbasis, getints, getJ,get2JmK,getK from PyQuante.LA2 import geigh,mkdens,trace2,simx from PyQuante.hartree_fock import get_fock from PyQuante.CGBF import three_center from PyQuante.optimize import fminBFGS from PyQuante.fermi_dirac import get_efermi, get_fermi_occs,mkdens_occs,\ get_entropy,mkdens_fermi import logging logger = logging.getLogger("pyquante") gradcall=0 class EXXSolver: "EXXSolver(solver)" def __init__(self,solver): # Solver is a pointer to a HF or a DFT calculation that has # already converged self.solver = solver self.bfs = self.solver.bfs self.nbf = len(self.bfs) self.S = self.solver.S self.h = self.solver.h self.Ints = self.solver.Ints self.molecule = self.solver.molecule self.nel = self.molecule.get_nel() self.nclosed, self.nopen = self.molecule.get_closedopen() self.Enuke = self.molecule.get_enuke() self.norb = self.nbf self.orbs = self.solver.orbs self.orbe = self.solver.orbe self.Gij = [] for g in xrange(self.nbf): gmat = zeros((self.nbf,self.nbf),'d') self.Gij.append(gmat) gbf = self.bfs[g] for i in xrange(self.nbf): ibf = self.bfs[i] for j in xrange(i+1): jbf = self.bfs[j] gij = three_center(ibf,gbf,jbf) gmat[i,j] = gij gmat[j,i] = gij D0 = mkdens(self.orbs,0,self.nclosed) J0 = getJ(self.Ints,D0) Vfa = (2.0*(self.nel-1.0)/self.nel)*J0 self.H0 = self.h + Vfa self.b = zeros(self.nbf,'d') return def iterate(self,**kwargs): self.iter = 0 self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature) logging.debug("iter Energy <b|b>") logging.debug("---- ------ -----") self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging) return def get_energy(self,b): self.iter += 1 self.Hoep = get_Hoep(b,self.H0,self.Gij) self.orbe,self.orbs = geigh(self.Hoep,self.S) if self.etemp: self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,self.orbs, self.etemp) else: self.D = mkdens(self.orbs,0,self.nclosed) self.entropy=0 self.F = get_fock(self.D,self.Ints,self.h) self.energy = trace2(self.h+self.F,self.D)+self.Enuke + self.entropy if self.iter == 1 or self.iter % 10 == 0: logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b))) return self.energy def get_gradient(self,b): energy = self.get_energy(b) Fmo = simx(self.F,self.orbs) bp = zeros(self.nbf,'d') for g in xrange(self.nbf): # Transform Gij[g] to MOs. This is done over the whole # space rather than just the parts we need. I can speed # this up later by only forming the i,a elements required Gmo = simx(self.Gij[g],self.orbs) # Now sum the appropriate terms to get the b gradient for i in xrange(self.nclosed): for a in xrange(self.nclosed,self.norb): bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(self.orbe[i]-self.orbe[a]) #logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp)))) return bp class UEXXSolver: "EXXSolver(solver)" def __init__(self,solver): # Solver is a pointer to a UHF calculation that has # already converged self.solver = solver self.bfs = self.solver.bfs self.nbf = len(self.bfs) self.S = self.solver.S self.h = self.solver.h self.Ints = self.solver.Ints self.molecule = self.solver.molecule self.nel = self.molecule.get_nel() self.nalpha, self.nbeta = self.molecule.get_alphabeta() self.Enuke = self.molecule.get_enuke() self.norb = self.nbf self.orbsa = self.solver.orbsa self.orbsb = self.solver.orbsb self.orbea = self.solver.orbea self.orbeb = self.solver.orbeb self.Gij = [] for g in xrange(self.nbf): gmat = zeros((self.nbf,self.nbf),'d') self.Gij.append(gmat) gbf = self.bfs[g] for i in xrange(self.nbf): ibf = self.bfs[i] for j in xrange(i+1): jbf = self.bfs[j] gij = three_center(ibf,gbf,jbf) gmat[i,j] = gij gmat[j,i] = gij D0 = mkdens(self.orbsa,0,self.nalpha)+mkdens(self.orbsb,0,self.nbeta) J0 = getJ(self.Ints,D0) Vfa = ((self.nel-1.)/self.nel)*J0 self.H0 = self.h + Vfa self.b = zeros(2*self.nbf,'d') return def iterate(self,**kwargs): self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature) self.iter = 0 logging.debug("iter Energy <b|b>") logging.debug("---- ------ -----") self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging) return def get_energy(self,b): self.iter += 1 ba = b[:self.nbf] bb = b[self.nbf:] self.Hoepa = get_Hoep(ba,self.H0,self.Gij) self.Hoepb = get_Hoep(bb,self.H0,self.Gij) self.orbea,self.orbsa = geigh(self.Hoepa,self.S) self.orbeb,self.orbsb = geigh(self.Hoepb,self.S) if self.etemp: self.Da,entropya = mkdens_fermi(2*self.nalpha,self.orbea,self.orbsa, self.etemp) self.Db,entropyb = mkdens_fermi(2*self.nbeta,self.orbeb,self.orbsb, self.etemp) self.entropy = 0.5*(entropya+entropyb) else: self.Da = mkdens(self.orbsa,0,self.nalpha) self.Db = mkdens(self.orbsb,0,self.nbeta) self.entropy=0 J = getJ(self.Ints,self.Da+self.Db) Ka = getK(self.Ints,self.Da) Kb = getK(self.Ints,self.Db) self.Fa = self.h + J - Ka self.Fb = self.h + J - Kb self.energy = 0.5*(trace2(self.h+self.Fa,self.Da) + trace2(self.h+self.Fb,self.Db))\ + self.Enuke + self.entropy if self.iter == 1 or self.iter % 10 == 0: logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b))) return self.energy def get_gradient(self,b): energy = self.get_energy(b) Fmoa = simx(self.Fa,self.orbsa) Fmob = simx(self.Fb,self.orbsb) bp = zeros(2*self.nbf,'d') for g in xrange(self.nbf): # Transform Gij[g] to MOs. This is done over the whole # space rather than just the parts we need. I can speed # this up later by only forming the i,a elements required Gmo = simx(self.Gij[g],self.orbsa) # Now sum the appropriate terms to get the b gradient for i in xrange(self.nalpha): for a in xrange(self.nalpha,self.norb): bp[g] += Fmoa[i,a]*Gmo[i,a]/(self.orbea[i]-self.orbea[a]) for g in xrange(self.nbf): # Transform Gij[g] to MOs. This is done over the whole # space rather than just the parts we need. I can speed # this up later by only forming the i,a elements required Gmo = simx(self.Gij[g],self.orbsb) # Now sum the appropriate terms to get the b gradient for i in xrange(self.nbeta): for a in xrange(self.nbeta,self.norb): bp[self.nbf+g] += Fmob[i,a]*Gmo[i,a]/(self.orbeb[i]-self.orbeb[a]) #logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp)))) return bp def exx(atoms,orbs,**kwargs): return oep_hf(atoms,orbs,**kwargs) def oep_hf(atoms,orbs,**kwargs): """oep_hf - Form the optimized effective potential for HF exchange. See notes on options and other args in oep routine. """ return oep(atoms,orbs,get_exx_energy,get_exx_gradient,**kwargs) def oep(atoms,orbs,energy_func,grad_func=None,**kwargs): """oep - Form the optimized effective potential for a given energy expression oep(atoms,orbs,energy_func,grad_func=None,**kwargs) atoms A Molecule object containing a list of the atoms orbs A matrix of guess orbitals energy_func The function that returns the energy for the given method grad_func The function that returns the force for the given method Options ------- verbose False Output terse information to stdout (default) True Print out additional information ETemp False Use ETemp value for finite temperature DFT (default) float Use (float) for the electron temperature bfs None The basis functions to use. List of CGBF's basis_data None The basis data to use to construct bfs integrals None The one- and two-electron integrals to use If not None, S,h,Ints """ verbose = kwargs.get('verbose') ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature) opt_method = kwargs.get('opt_method',settings.OEPOptMethod) bfs = getbasis(atoms,**kwargs) # The basis set for the potential can be set different from # that used for the wave function pbfs = kwargs.get('pbfs') if not pbfs: pbfs = bfs npbf = len(pbfs) S,h,Ints = getints(bfs,atoms,**kwargs) nel = atoms.get_nel() nocc,nopen = atoms.get_closedopen() Enuke = atoms.get_enuke() # Form the OEP using Yang/Wu, PRL 89 143002 (2002) nbf = len(bfs) norb = nbf bp = zeros(nbf,'d') bvec = kwargs.get('bvec') if bvec: assert len(bvec) == npbf b = array(bvec) else: b = zeros(npbf,'d') # Form and store all of the three-center integrals # we're going to need. # These are <ibf|gbf|jbf> (where 'bf' indicates basis func, # as opposed to MO) # N^3 storage -- obviously you don't want to do this for # very large systems Gij = [] for g in xrange(npbf): gmat = zeros((nbf,nbf),'d') Gij.append(gmat) gbf = pbfs[g] for i in xrange(nbf): ibf = bfs[i] for j in xrange(i+1): jbf = bfs[j] gij = three_center(ibf,gbf,jbf) gmat[i,j] = gij gmat[j,i] = gij # Compute the Fermi-Amaldi potential based on the LDA density. # We're going to form this matrix from the Coulombic matrix that # arises from the input orbitals. D0 and J0 refer to the density # matrix and corresponding Coulomb matrix D0 = mkdens(orbs,0,nocc) J0 = getJ(Ints,D0) Vfa = (2*(nel-1.)/nel)*J0 H0 = h + Vfa b = fminBFGS(energy_func,b,grad_func, (nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij), logger=logging) energy,orbe,orbs = energy_func(b,nbf,nel,nocc,ETemp,Enuke, S,h,Ints,H0,Gij,return_flag=1) return energy,orbe,orbs def get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs): """Computes the energy for the OEP/HF functional Options: return_flag 0 Just return the energy 1 Return energy, orbe, orbs 2 Return energy, orbe, orbs, F """ return_flag = kwargs.get('return_flag') Hoep = get_Hoep(b,H0,Gij) orbe,orbs = geigh(Hoep,S) if ETemp: efermi = get_efermi(nel,orbe,ETemp) occs = get_fermi_occs(efermi,orbe,ETemp) D = mkdens_occs(orbs,occs) entropy = get_entropy(occs,ETemp) else: D = mkdens(orbs,0,nocc) F = get_fock(D,Ints,h) energy = trace2(h+F,D)+Enuke if ETemp: energy += entropy iref = nel/2 gap = 627.51*(orbe[iref]-orbe[iref-1]) logging.debug("EXX Energy, B, Gap: %10.5f %10.5f %10.5f" % (energy,sqrt(dot(b,b)),gap)) #logging.debug("%s" % orbe) if return_flag == 1: return energy,orbe,orbs elif return_flag == 2: return energy,orbe,orbs,F return energy def get_exx_gradient(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs): """Computes the gradient for the OEP/HF functional. return_flag 0 Just return gradient 1 Return energy,gradient 2 Return energy,gradient,orbe,orbs """ # Dump the gradient every 10 steps so we can restart... global gradcall gradcall += 1 #if gradcall % 5 == 0: logging.debug("B vector:\n%s" % b) # Form the new potential and the new orbitals energy,orbe,orbs,F = get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke, S,h,Ints,H0,Gij,return_flag=2) Fmo = matrixmultiply(transpose(orbs),matrixmultiply(F,orbs)) norb = nbf bp = zeros(nbf,'d') # dE/db for g in xrange(nbf): # Transform Gij[g] to MOs. This is done over the whole # space rather than just the parts we need. I can speed # this up later by only forming the i,a elements required Gmo = matrixmultiply(transpose(orbs),matrixmultiply(Gij[g],orbs)) # Now sum the appropriate terms to get the b gradient for i in xrange(nocc): for a in xrange(nocc,norb): bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(orbe[i]-orbe[a]) #logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp)))) return_flag = kwargs.get('return_flag') if return_flag == 1: return energy,bp elif return_flag == 2: return energy,bp,orbe,orbs return bp def get_Hoep(b,H0,Gij): Hoep = H0 # Add the contributions from the gaussian potential functions # H[ij] += b[g]*<ibf|g|jbf> for g in xrange(len(b)): Hoep = Hoep + b[g]*Gij[g] return Hoep # Here's a much faster way to do this. Haven't figured out how to # do it for more generic functions like OEP-GVB def oep_hf_an(atoms,orbs,**kwargs): """oep_hf - Form the optimized effective potential for HF exchange. Implementation of Wu and Yang's Approximate Newton Scheme from J. Theor. Comp. Chem. 2, 627 (2003). oep_hf(atoms,orbs,**kwargs) atoms A Molecule object containing a list of the atoms orbs A matrix of guess orbitals Options ------- bfs None The basis functions to use for the wfn pbfs None The basis functions to use for the pot basis_data None The basis data to use to construct bfs integrals None The one- and two-electron integrals to use If not None, S,h,Ints """ maxiter = kwargs.get('maxiter',settings.OEPIters) tol = kwargs.get('tol',settings.OEPTolerance) bfs = getbasis(atoms,**kwargs) # The basis set for the potential can be set different from # that used for the wave function pbfs = kwargs.get('pbfs') if not pbfs: pbfs = bfs npbf = len(pbfs) S,h,Ints = getints(bfs,atoms) nel = atoms.get_nel() nocc,nopen = atoms.get_closedopen() Enuke = atoms.get_enuke() # Form the OEP using Yang/Wu, PRL 89 143002 (2002) nbf = len(bfs) norb = nbf bp = zeros(nbf,'d') bvec = kwargs.get('bvec') if bvec: assert len(bvec) == npbf b = array(bvec) else: b = zeros(npbf,'d') # Form and store all of the three-center integrals # we're going to need. # These are <ibf|gbf|jbf> (where 'bf' indicates basis func, # as opposed to MO) # N^3 storage -- obviously you don't want to do this for # very large systems Gij = [] for g in xrange(npbf): gmat = zeros((nbf,nbf),'d') Gij.append(gmat) gbf = pbfs[g] for i in xrange(nbf): ibf = bfs[i] for j in xrange(i+1): jbf = bfs[j] gij = three_center(ibf,gbf,jbf) gmat[i,j] = gij gmat[j,i] = gij # Compute the Fermi-Amaldi potential based on the LDA density. # We're going to form this matrix from the Coulombic matrix that # arises from the input orbitals. D0 and J0 refer to the density # matrix and corresponding Coulomb matrix D0 = mkdens(orbs,0,nocc) J0 = getJ(Ints,D0) Vfa = (2*(nel-1.)/nel)*J0 H0 = h + Vfa b = zeros(nbf,'d') eold = 0 for iter in xrange(maxiter): Hoep = get_Hoep(b,H0,Gij) orbe,orbs = geigh(Hoep,S) D = mkdens(orbs,0,nocc) Vhf = get2JmK(Ints,D) energy = trace2(2*h+Vhf,D)+Enuke if abs(energy-eold) < tol: break else: eold = energy logging.debug("OEP AN Opt: %d %f" % (iter,energy)) dV_ao = Vhf-Vfa dV = matrixmultiply(transpose(orbs),matrixmultiply(dV_ao,orbs)) X = zeros((nbf,nbf),'d') c = zeros(nbf,'d') Gkt = zeros((nbf,nbf),'d') for k in xrange(nbf): # This didn't work; in fact, it made things worse: Gk = matrixmultiply(transpose(orbs),matrixmultiply(Gij[k],orbs)) for i in xrange(nocc): for a in xrange(nocc,norb): c[k] += dV[i,a]*Gk[i,a]/(orbe[i]-orbe[a]) for l in xrange(nbf): Gl = matrixmultiply(transpose(orbs),matrixmultiply(Gij[l],orbs)) for i in xrange(nocc): for a in xrange(nocc,norb): X[k,l] += Gk[i,a]*Gl[i,a]/(orbe[i]-orbe[a]) # This should actually be a pseudoinverse... b = solve(X,c) logger.info("Final OEP energy = %f" % energy) return energy,orbe,orbs def oep_uhf_an(atoms,orbsa,orbsb,**kwargs): """oep_hf - Form the optimized effective potential for HF exchange. Implementation of Wu and Yang's Approximate Newton Scheme from J. Theor. Comp. Chem. 2, 627 (2003). oep_uhf(atoms,orbs,**kwargs) atoms A Molecule object containing a list of the atoms orbs A matrix of guess orbitals Options ------- bfs None The basis functions to use for the wfn pbfs None The basis functions to use for the pot basis_data None The basis data to use to construct bfs integrals None The one- and two-electron integrals to use If not None, S,h,Ints """ maxiter = kwargs.get('maxiter',settings.OEPIters) tol = kwargs.get('tol',settings.OEPTolerance) ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature) bfs = getbasis(atoms,**kwargs) # The basis set for the potential can be set different from # that used for the wave function pbfs = kwargs.get('pbfs') if not pbfs: pbfs = bfs npbf = len(pbfs) S,h,Ints = getints(bfs,atoms,**kwargs) nel = atoms.get_nel() nclosed,nopen = atoms.get_closedopen() nalpha,nbeta = nclosed+nopen,nclosed Enuke = atoms.get_enuke() # Form the OEP using Yang/Wu, PRL 89 143002 (2002) nbf = len(bfs) norb = nbf ba = zeros(npbf,'d') bb = zeros(npbf,'d') # Form and store all of the three-center integrals # we're going to need. # These are <ibf|gbf|jbf> (where 'bf' indicates basis func, # as opposed to MO) # N^3 storage -- obviously you don't want to do this for # very large systems Gij = [] for g in xrange(npbf): gmat = zeros((nbf,nbf),'d') Gij.append(gmat) gbf = pbfs[g] for i in xrange(nbf): ibf = bfs[i] for j in xrange(i+1): jbf = bfs[j] gij = three_center(ibf,gbf,jbf) gmat[i,j] = gij gmat[j,i] = gij # Compute the Fermi-Amaldi potential based on the LDA density. # We're going to form this matrix from the Coulombic matrix that # arises from the input orbitals. D0 and J0 refer to the density # matrix and corresponding Coulomb matrix D0 = mkdens(orbsa,0,nalpha)+mkdens(orbsb,0,nbeta) J0 = getJ(Ints,D0) Vfa = ((nel-1.)/nel)*J0 H0 = h + Vfa eold = 0 for iter in xrange(maxiter): Hoepa = get_Hoep(ba,H0,Gij) Hoepb = get_Hoep(ba,H0,Gij) orbea,orbsa = geigh(Hoepa,S) orbeb,orbsb = geigh(Hoepb,S) if ETemp: efermia = get_efermi(2*nalpha,orbea,ETemp) occsa = get_fermi_occs(efermia,orbea,ETemp) Da = mkdens_occs(orbsa,occsa) efermib = get_efermi(2*nbeta,orbeb,ETemp) occsb = get_fermi_occs(efermib,orbeb,ETemp) Db = mkdens_occs(orbsb,occsb) entropy = 0.5*(get_entropy(occsa,ETemp)+get_entropy(occsb,ETemp)) else: Da = mkdens(orbsa,0,nalpha) Db = mkdens(orbsb,0,nbeta) J = getJ(Ints,Da) + getJ(Ints,Db) Ka = getK(Ints,Da) Kb = getK(Ints,Db) energy = (trace2(2*h+J-Ka,Da)+trace2(2*h+J-Kb,Db))/2\ +Enuke if ETemp: energy += entropy if abs(energy-eold) < tol: break else: eold = energy logging.debug("OEP AN Opt: %d %f" % (iter,energy)) # Do alpha and beta separately # Alphas dV_ao = J-Ka-Vfa dV = matrixmultiply(orbsa,matrixmultiply(dV_ao,transpose(orbsa))) X = zeros((nbf,nbf),'d') c = zeros(nbf,'d') for k in xrange(nbf): Gk = matrixmultiply(orbsa,matrixmultiply(Gij[k], transpose(orbsa))) for i in xrange(nalpha): for a in xrange(nalpha,norb): c[k] += dV[i,a]*Gk[i,a]/(orbea[i]-orbea[a]) for l in xrange(nbf): Gl = matrixmultiply(orbsa,matrixmultiply(Gij[l], transpose(orbsa))) for i in xrange(nalpha): for a in xrange(nalpha,norb): X[k,l] += Gk[i,a]*Gl[i,a]/(orbea[i]-orbea[a]) # This should actually be a pseudoinverse... ba = solve(X,c) # Betas dV_ao = J-Kb-Vfa dV = matrixmultiply(orbsb,matrixmultiply(dV_ao,transpose(orbsb))) X = zeros((nbf,nbf),'d') c = zeros(nbf,'d') for k in xrange(nbf): Gk = matrixmultiply(orbsb,matrixmultiply(Gij[k], transpose(orbsb))) for i in xrange(nbeta): for a in xrange(nbeta,norb): c[k] += dV[i,a]*Gk[i,a]/(orbeb[i]-orbeb[a]) for l in xrange(nbf): Gl = matrixmultiply(orbsb,matrixmultiply(Gij[l], transpose(orbsb))) for i in xrange(nbeta): for a in xrange(nbeta,norb): X[k,l] += Gk[i,a]*Gl[i,a]/(orbeb[i]-orbeb[a]) # This should actually be a pseudoinverse... bb = solve(X,c) logger.info("Final OEP energy = %f" % energy) return energy,(orbea,orbeb),(orbsa,orbsb) def test_old(): from PyQuante.Molecule import Molecule from PyQuante.Ints import getbasis,getints from PyQuante.hartree_fock import rhf logging.basicConfig(level=logging.DEBUG,format="%(message)s") #mol = Molecule('HF',[('H',(0.,0.,0.)),('F',(0.,0.,0.898369))], # units='Angstrom') mol = Molecule('LiH',[(1,(0,0,1.5)),(3,(0,0,-1.5))],units = 'Bohr') bfs = getbasis(mol) S,h,Ints = getints(bfs,mol) print "after integrals" E_hf,orbe_hf,orbs_hf = rhf(mol,bfs=bfs,integrals=(S,h,Ints),DoAveraging=True) print "RHF energy = ",E_hf E_exx,orbe_exx,orbs_exx = exx(mol,orbs_hf,bfs=bfs,integrals=(S,h,Ints)) return def test(): from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver logging.basicConfig(level=logging.DEBUG,format="%(message)s") mol = Molecule("He",[(2,(0,0,0))]) solver = HFSolver(mol) solver.iterate() print "HF energy = ",solver.energy dft_solver = DFTSolver(mol) dft_solver.iterate() print "DFT energy = ",dft_solver.energy oep = EXXSolver(solver) # Testing 0 temp oep.iterate() # Testing finite temp oep.iterate(etemp=40000) return def utest(): from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver logging.basicConfig(level=logging.DEBUG,format="%(message)s") mol = Molecule("He",[(2,(0,0,0))]) mol = Molecule("Li",[(3,(0,0,0))],multiplicity=2) solver = UHFSolver(mol) solver.iterate() print "HF energy = ",solver.energy dft_solver = DFTSolver(mol) dft_solver.iterate() print "DFT energy = ",dft_solver.energy oep = UEXXSolver(solver) # Testing 0 temp oep.iterate() # Testing finite temp oep.iterate(etemp=10000) return if __name__ == '__main__': test() utest()
berquist/PyQuante
PyQuante/OEP.py
Python
bsd-3-clause
25,427
[ "Gaussian" ]
4f798bafe39e1af47c1912f458204f3567a219816356b4639ebb8bc0d5b39828
# -*- coding: utf-8 -*- # # Google Genomics documentation build configuration file, created by # sphinx-quickstart on Wed Apr 30 15:58:16 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import sphinx.environment from docutils.utils import get_source_line # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Google Genomics' copyright = u'2015, Google Inc' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = 'v1' # The full version, including alpha/beta/rc tags. release = 'v1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', 'includes/*'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # THIS IS THE GOOGLE GENOMICS FOOTER rst_epilog = """ ------------------------------------------------------------------------------------ .. container:: ggfooter Have feedback or corrections? All improvements to these docs are welcome! You can click on the "Edit on GitHub" link at the top right corner of this page or `file an issue <https://github.com/googlegenomics/start-here/issues>`_. Need more help? Please see https://cloud.google.com/genomics/support. .. GLOBAL LINK REPLACEMENTS CAN GO HERE .. ### Data links .. _Personal Genome Project: http://www.personalgenomes.org/ .. _PGP: http://www.personalgenomes.org/ .. _ClinVar: http://www.ncbi.nlm.nih.gov/clinvar/ .. _UCSC Sequence and Annotation Data: http://hgdownload.cse.ucsc.edu/ .. _Cancer Genomics Cloud: http://isb-cgc.org .. _Illumina Platinum Genomes project data: http://googlegenomics.readthedocs.org/en/latest/use_cases/discover_public_data/platinum_genomes.html .. _1000 Genomes project data: http://googlegenomics.readthedocs.org/en/latest/use_cases/discover_public_data/1000_genomes.html .. ### Gene links .. _BRCA1: http://ghr.nlm.nih.gov/gene/BRCA1 .. _BRCA2: http://ghr.nlm.nih.gov/gene/BRCA2 .. ### GA4GH Links .. _GA4GH: http://ga4gh.org/#/api .. _Global Alliance for Genomics and Health API: http://ga4gh.org/#/api .. _Global Alliance for Genomics and Health Beacon: http://ga4gh.org/#/beacon .. ### VCF documentation links .. _VCF specification: https://samtools.github.io/hts-specs/VCFv4.3.pdf .. _gVCF: https://sites.google.com/site/gvcftools/home/about-gvcf .. _genome VCF: https://sites.google.com/site/gvcftools/home/about-gvcf .. _gVCF conventions: https://sites.google.com/site/gvcftools/home/about-gvcf/gvcf-conventions .. ### Corporate ecosystem links .. _Complete Genomics: http://www.completegenomics.com .. _Illumina: https://www.illumina.com .. ### Google genomics organizational links .. _googlegenomics github organization: https://github.com/googlegenomics .. _Contact us: google-genomics-contact@googlegroups.com .. ### Google Guide Links .. _Application Default Credentials: https://developers.google.com/identity/protocols/application-default-credentials .. ### Google Product Links .. _Google BigQuery: https://cloud.google.com/bigquery/ .. _Google Cloud Dataflow: https://cloud.google.com/dataflow/ .. _Google Cloud Storage: https://cloud.google.com/storage/ .. _Google Compute Engine: https://cloud.google.com/compute/ .. _Google Cloud Platform Console: https://console.cloud.google.com/ .. _Cloud Console: https://console.cloud.google.com/ .. _Google Cloud Shell: https://cloud.google.com/shell/docs/ .. _Cloud Shell: https://cloud.google.com/shell/docs/ .. _Google Genomics: https://cloud.google.com/genomics/ .. _Google Cloud Datalab: https://cloud.google.com/datalab/ .. _Google Cloud Dataproc: https://cloud.google.com/dataproc/ .. ### Deep links into the Developers Console .. _click-to-deploy NCBI BLAST: https://console.cloud.google.com/project/_/launcher/details/click-to-deploy-images/ncbiblast .. _click-to-deploy Bioconductor: https://console.cloud.google.com/project/_/mc/template/bioconductor .. _Deployments: https://console.cloud.google.com/project/_/deployments .. ### Deep links into cloud.google.com documentation .. _Compute Engine resource quota: https://cloud.google.com/compute/docs/resource-quotas .. _Compute Engine quota request form: https://docs.google.com/a/google.com/forms/d/1vb2MkAr9JcHrp6myQ3oTxCyBv2c7Iyc5wqIKqE3K4IE/viewform .. _Compute Engine Preemptible Virtual Machines: https://cloud.google.com/preemptible-vms/ .. _Google BigQuery query reference: https://cloud.google.com/bigquery/query-reference .. _Google BigQuery user-defined functions: https://cloud.google.com/bigquery/user-defined-functions .. _Dataflow Quickstart: https://cloud.google.com/dataflow/docs/quickstarts/quickstart-java-maven .. _Genomics Quickstart: https://cloud.google.com/genomics/quickstart .. _cloud.google.com/genomics: https://cloud.google.com/genomics .. _What is Google Genomics: https://cloud.google.com/genomics/what-is-google-genomics .. _Google Genomics fundamentals: https://cloud.google.com/genomics/what-is-google-genomics#fundamentals .. _authentication instructions: https://cloud.google.com/genomics/install-genomics-tools#authenticate .. _Google Genomics Pricing: https://cloud.google.com/genomics/pricing .. _Google Genomics Tools: https://cloud.google.com/genomics/install-genomics-tools .. _Google Genomics API: https://cloud.google.com/genomics/reference/rest/ .. _Google Genomics Reads API: https://cloud.google.com/genomics/reference/rest/v1/reads .. _Google Genomics Variants API: https://cloud.google.com/genomics/reference/rest/v1/variants .. _Google Genomics Pipelines API: https://cloud.google.com/genomics/reference/rest/v1alpha2/pipelines .. _Google APIs Explorer: https://developers.google.com/apis-explorer/#p/genomics/v1/ .. _Variant Import merge logic details: https://cloud.google.com/genomics/v1/managing-variants#imports .. _VariantSet: https://cloud.google.com/genomics/reference/rest/v1/variantsets .. _Load Genomic Variants: https://cloud.google.com/genomics/v1/load-variants .. _Understanding the BigQuery Variants Table Schema: https://cloud.google.com/genomics/v1/bigquery-variants-schema .. _Verily DeepVariant: https://cloud.google.com/genomics/v1alpha2/deepvariant .. _Using Google Cloud Storage with Big Data: https://cloud.google.com/storage/docs/working-with-big-data .. _gsutil: https://cloud.google.com/storage/docs/gsutil .. _install gcloud: https://cloud.google.com/sdk/ .. _gcloud: https://cloud.google.com/sdk/ .. _persistent disk: https://cloud.google.com/compute/docs/tutorials/compute-engine-disks-price-performance-and-persistence .. _selecting the right persistent disk: https://cloud.google.com/compute/docs/tutorials/compute-engine-disks-price-performance-and-persistence#selecting_the_right_disk .. ### Open ecosystem links .. _ALPN: http://www.eclipse.org/jetty/documentation/9.2.10.v20150310/alpn-chapter.html .. _Annovar: http://annovar.openbioinformatics.org/en/latest/ .. _Apache Spark: https://spark.apache.org/ .. _Apache Hadoop: https://hadoop.apache.org/ .. _Docker: https://www.docker.com/ .. _Elasticluster: https://elasticluster.readthedocs.org .. _Elasticluster repo: https://github.com/gc3-uzh-ch/elasticluster .. _Galaxy: https://galaxyproject.org/ .. _Grid Engine: http://gridengine.info/ .. _Java 8: http://www.oracle.com/technetwork/java/javase/downloads/index.html .. _NCBI BLAST: http://blast.ncbi.nlm.nih.gov/Blast.cgi .. _NCBI BLAST Cloud Documentation: http://ncbi.github.io/blast-cloud/ .. _R: http://www.r-project.org/ .. _S3IT: http://www.s3it.uzh.ch/ .. _Slurm: https://slurm.schedmd.com/ .. _Bioconductor: http://www.bioconductor.org/ .. _Using Bioconductor: http://www.bioconductor.org/install/ .. _Dockerized Bioconductor: http://bioconductor.org/help/docker/ .. _GoogleGenomics Bioconductor package: http://bioconductor.org/packages/release/bioc/html/GoogleGenomics.html .. _IGV: https://www.broadinstitute.org/igv/ .. _Picard: http://broadinstitute.github.io/picard/ .. _GATK: https://www.broadinstitute.org/gatk/ .. _HTSJDK: https://github.com/samtools/htsjdk/ .. _gridengine array job: http://wiki.gridengine.info/wiki/index.php/Simple-Job-Array-Howto .. ### googlegenomics github links .. _gatk-tools-java: https://github.com/googlegenomics/gatk-tools-java .. _Data Analysis using Google Genomics: https://github.com/googlegenomics/codelabs/tree/master/R/1000Genomes-BRCA1-analysis .. _Quality Control using Google Genomics: https://github.com/googlegenomics/codelabs/tree/master/R/PlatinumGenomes-QC .. _BiocDockerOnGCE launch script: https://raw.githubusercontent.com/googlegenomics/gce-images/master/launch-scripts/bioconductorRStudioGCE.sh .. _Grid Computing Tools github repo: https://github.com/googlegenomics/grid-computing-tools .. _getting-started-bigquery: https://github.com/googlegenomics/getting-started-bigquery .. _bigquery-examples: https://github.com/googlegenomics/bigquery-examples .. _pipelines-api-examples: https://github.com/googlegenomics/pipelines-api-examples .. ### R package links .. _VariantAnnotation: http://bioconductor.org/packages/release/bioc/html/VariantAnnotation.html .. _ggbio: http://bioconductor.org/packages/release/bioc/html/ggbio.html .. _ggplot2: http://cran.r-project.org/web/packages/ggplot2/index.html .. _dplyr: http://cran.r-project.org/web/packages/dplyr/index.html .. _bigrquery: http://cran.r-project.org/web/packages/bigrquery/index.html .. _GoogleGenomics: http://bioconductor.org/packages/release/bioc/html/GoogleGenomics.html .. ### Python installation and package links .. _Python user scheme: https://docs.python.org/2/install/index.html#alternate-installation-the-user-scheme .. _virtualenv: http://docs.python-guide.org/en/latest/dev/virtualenvs/ .. ### Genomics API Getting started examples in github .. _Python getting started: https://github.com/googlegenomics/getting-started-with-the-api/tree/master/python .. _Java getting started: https://github.com/googlegenomics/getting-started-with-the-api/tree/master/java .. _Go getting started: https://github.com/googlegenomics/getting-started-with-the-api/tree/master/go .. GLOBAL SUBSTITUTIONS CAN GO HERE .. |sparkADC| replace:: If the `Application Default Credentials`_ are not sufficient, use ``--client-secrets=PATH/TO/YOUR/client_secrets.json``. If you do not already have this file, see the `authentication instructions`_ to obtain it. .. |dataflowADC| replace:: If the `Application Default Credentials`_ are not sufficient, use ``--client-secrets PATH/TO/YOUR/client_secrets.json``. If you do not already have this file, see the `authentication instructions`_ to obtain it. .. |dataflowSomeRefs| replace:: Use a comma-separated list to run over multiple disjoint regions. For example to run over `BRCA1`_ and `BRCA2`_ ``--references=chr13:32889610:32973808,chr17:41196311:41277499``. .. |dataflowAllRefs| replace:: To run this pipeline over the entire genome, use ``--allReferences`` instead of ``--references=chr17:41196311:41277499``. """ # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' #------------[ For Local Development ] ------------------------------------- # See https://github.com/snide/sphinx_rtd_theme for theme install instructions. import os on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'GoogleGenomicsdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'GoogleGenomics.tex', u'Google Genomics Documentation', u'Google Genomics <https://cloud.google.com/genomics>', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'googlegenomics', u'Google Genomics Documentation', [u'Google Genomics <https://cloud.google.com/genomics>'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'GoogleGenomics', u'Google Genomics Documentation', u'Google Genomics <https://cloud.google.com/genomics>', 'GoogleGenomics', 'Google Genomics Cookbook', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
deflaux/start-here
docs/source/conf.py
Python
apache-2.0
18,947
[ "BLAST", "Bioconductor" ]
d74d69ea9016df3588951c7d7413ece1f079ba96ed363b53137910119295d6b3
""" The B{0install download} command-line interface. """ # Copyright (C) 2011, Thomas Leonard # See the README file for details, or visit http://0install.net. import sys from zeroinstall import _ from zeroinstall.cmd import UsageError, select from zeroinstall.injector import model from zeroinstall.support import tasks syntax = "URI" def add_options(parser): select.add_options(parser) parser.add_option("", "--show", help=_("show where components are installed"), action='store_true') def handle(config, options, args): """@type config: L{zeroinstall.injector.config.Config} @type args: [str]""" if len(args) != 1: raise UsageError() app = config.app_mgr.lookup_app(args[0], missing_ok = True) if app is not None: sels = app.get_selections() r = app.get_requirements() do_select = r.parse_update_options(options) iface_uri = sels.interface else: iface_uri = model.canonical_iface_uri(args[0]) do_select = True if do_select or options.gui: sels = select.get_selections(config, options, iface_uri, select_only = False, download_only = True, test_callback = None) if not sels: sys.exit(1) # Aborted by user else: dl = app.download_selections(sels) if dl: tasks.wait_for_blocker(dl) tasks.check(dl) if options.xml: select.show_xml(sels) if options.show: select.show_human(sels, config.stores) if app is not None and do_select: print(_("(use '0install update' to save the new parameters)")) complete = select.complete
slovenwd/0install
zeroinstall/cmd/download.py
Python
lgpl-2.1
1,483
[ "VisIt" ]
5579b4966c0fefa72498edd17512f740dc699e49c57342c46388671372a329ec
#!/usr/bin/env python2 # createLinearModel.py ############################################################################### # ------------------------- Description --------------------------------------- ############################################################################### # This script will be used to create a linear model of the relationship between # meteorlogy parameters and wildfire emissions. import cesm_nc_manager as cnm import os import numpy as np import sys from mpl_toolkits.basemap import Basemap, cm from netCDF4 import Dataset import matplotlib.pyplot as plt import matplotlib as mpl import numpy.ma as ma from datetime import date from datetime import timedelta import matplotlib.ticker as tkr import pandas as pd # The current goals of this script are to: # 1) Get North American Emissions and Air Quality data onto a single grid so # can be easily compared # 2) Make a scatterplot of T vs. emissions for a given month. Find a way to # visualize and report the summary of the observed relationship. startMonth = 1 endMonth = 12 ############################################################################### # Load Emissions grid for a selected variable # Emissions variables all have leading 'E' ############################################################################### EVar = "BC" EScenario = "RCP85" # NOTE: RCP85 all that exists for base period 2000s EYear = "2010" E, EUnits, ELongName, Et, Elat, Elon = cnm.getEmissionVariableData(EVar,\ EScenario,\ EYear) # Take care of propert area weighting of emissions and chance from per second # average for the day to total for day area = cnm.loadEmissionGridAttributes(Elat, Elon) # m^2 of each cell kgPerDay, kgTotal = cnm.makeTotalEmissions(E, area, Et) ############################################################################### # Load Emissions grid for a selected variable # Meteorology varibles all have leading 'M' ############################################################################### MVar = 'T' MScenario = '2000Base' ncFile = cnm.makeAQNCFile(MVar, MScenario) M, MUnits, MLongName, Mt, Mlat, Mlon = cnm.getGroundAirQaulityData(MVar,\ ncFile,\ MScenario) ############################################################################### # This work is focussed on PM from fire and dust in the western U.S. Apply a # western U.S. bounding condition to both Emissions and Air quality arrays. ############################################################################### minLon = 125.*-1 + 360. # Longitude of Cape Alava maxLon = 100.*-1 + 360. # Longitude of Eastern Border of Colorado and then some minLat = 31. # Arizona Mexico border maxLat = 49. # Oh Canada, the only place I might get a job. # Meteorology first M, Mt, Mlat, Mlon = cnm.subsetModelEmissions(M, Mt, Mlat, Mlon, startMonth, endMonth, minLat, maxLat, minLon, maxLon) # Now Emissions Parameter E, Et, Elat, Elon = cnm.subsetModelEmissions(E, Et, Elat, Elon, startMonth, endMonth, minLat, maxLat, minLon, maxLon) # Check the sizes of the returned data arrays. Make sure dimensions and dates # match before any analysis is considered. # Regridding information link: # http://stackoverflow.com/questions/25544110/regridding-regular-netcdf-data # Figure out a way to make the two grids comparable # Emissions are on a smaller grid than air quality parameters which are global. # Subset air quality grid by the four corners of the emissions grid minLon = Elon[0] maxLon = Elon[-1] maxLat = Elat[-1] minLat = Elat[0] MSubset, MtSubset, MlatSubset, MlonSubset = cnm.subsetModelEmissions(M, Mt, Mlat, Mlon, startMonth, endMonth, minLat, maxLat, minLon, maxLon) # Deal with the Jan 1 day in 2011 that Met data has for some reason if ( (MtSubset[-1] != Et[-1]) | (MtSubset[0] != Et[0]) ): # See where the first and last dates fall Mfirst_t = np.where(MtSubset == Et[0])[0][0] Mlast_t = np.where(MtSubset == Et[-1])[0][0] # Subset the time dimension and array directly MtSubset = MtSubset[Mfirst_t:(Mlast_t+1)] MSubset = MSubset[Mfirst_t:(Mlast_t+1),:,:] # Make sure times and shapes align before allowing code to proceed beyond this # point if (MSubset.shape == E.shape): M = MSubset ; del MSubset Mlat = MlatSubset ; del MlatSubset Mlon = MlonSubset ; del MlonSubset # Make sure all times are the same also if np.unique(Et- MtSubset)[0] == timedelta(0): Mt = MtSubset ; del MtSubset t = Mt else: raise ValueError('Emission t(ime) and Air Quality t(ime) do not match') else: raise ValueError('Emission grid and Air Quality grids do not match') # I want an array that labels the month of each time value nDays = len(t) # make arrray that is month of t mon = np.zeros(nDays,dtype=int) for i in range(nDays): mon[i] = t[i].month ############################################################################### # Explore relationship between the two variables ############################################################################### # Make the Raw plot if (False): # TODO: Color each point by month plt.figure(figsize=(12,9)) # Remove the plot frame lines because they are useless ax = plt.subplot(111) ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) # Also remove tick marks from these sides ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() plt.scatter(M.ravel(), kgPerDay.ravel(), marker='.', color='k') plt.title("Raw Scatterplot of " + EVar + ' vs. ' + MVar, fontsize=30) plt.xlabel(MVar + ' [' + MUnits + ']', fontsize=20) plt.ylabel(EVar + ' [kg/day]', fontsize=20) plt.savefig('testRaw.png') ############################################################################### # Now plot daily totals for the chosedomain ############################################################################### # to consider # http://stackoverflow.com/questions/17450313/summing-over-months-with-pandas dailyDomainMMean = np.mean(M, axis=(1,2)) dailyDomainESum = np.sum(kgPerDay, axis=(1,2)) plt.figure(figsize=(12,9)) # Remove the plot frame lines because they are useless ax = plt.subplot(111) ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) #ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0)) #ax.yaxis.get_major_formatter().set_powerlimits((0, 1)) ax.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter(useMathText=True, useOffset=False)) # Also remove tick marks from these sides ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() plt.scatter(KtoF(dailyDomainMMean), dailyDomainESum, marker='.', c=mon, s=40, lw=0) cbar = plt.colorbar(ticks = [1,2,3,4,4,5,6,7,8,9,10,11,12]) cbar.set_label('Month', rotation=270, fontsize=20) plt.title("Scatterplot of daily summed " + EVar + ' vs. monthly mean ' +\ MVar, fontsize=24, y=1.04) plt.xlabel(MVar + ' [' + MUnits + ']', fontsize=20) plt.ylabel(EVar + ' [kg/day]', fontsize=20) plt.ylim([0, dailyDomainESum.max()]) plt.savefig('testDaily.png') ############################################################################### # Also explore this mon shaded time series to explore why there are some days # in unexspected months that have really high emissions. Could it be Mexico? # Subset the domain being plotted and see if the signal remains. ############################################################################### plt.scatter(t, dailyDomainESum, c=mon) ############################################################################### # Now plot monthy totals for the chosedomain, first make a handy pandas dataframe ############################################################################### #d = {'time': t, 'dailyDomainMMean': dailyDomainMMean}
stevenjoelbrey/PMFutures
Python/createLinearModel.py
Python
mit
8,965
[ "NetCDF" ]
7cfe5f2f93c0baa791e96078fcfb35883faf94d1ea9fb9648dc20f4247c44ed7
# Changes: # # can now specify 'zipfile = None', in this case the Python module # library archive is appended to the exe. # Todo: # # Make 'unbuffered' a per-target option from distutils.core import Command from distutils.spawn import spawn from distutils.errors import * import sys, os, imp, types, stat import marshal import zipfile import sets import tempfile import struct is_win64 = struct.calcsize("P") == 8 def _is_debug_build(): for ext, _, _ in imp.get_suffixes(): if ext == "_d.pyd": return True return False is_debug_build = _is_debug_build() if is_debug_build: python_dll = "python%d%d_d.dll" % sys.version_info[:2] else: python_dll = "python%d%d.dll" % sys.version_info[:2] # resource constants RT_BITMAP=2 # note: we cannot use the list from imp.get_suffixes() because we want # .pyc and .pyo, independent of the optimize flag. _py_suffixes = ['.py', '.pyo', '.pyc', '.pyw'] _c_suffixes = [_triple[0] for _triple in imp.get_suffixes() if _triple[2] == imp.C_EXTENSION] def imp_find_module(name): # same as imp.find_module, but handles dotted names names = name.split('.') path = None for name in names: result = imp.find_module(name, path) path = [result[1]] return result def fancy_split(str, sep=","): # a split which also strips whitespace from the items # passing a list or tuple will return it unchanged if str is None: return [] if hasattr(str, "split"): return [item.strip() for item in str.split(sep)] return str def ensure_unicode(text): if isinstance(text, unicode): return text return text.decode("mbcs") # This loader locates extension modules relative to the library.zip # file when an archive is used (i.e., skip_archive is not used), otherwise # it locates extension modules relative to sys.prefix. LOADER = """ def __load(): import imp, os, sys try: dirname = os.path.dirname(__loader__.archive) except NameError: dirname = sys.prefix path = os.path.join(dirname, '%s') #print "py2exe extension module", __name__, "->", path mod = imp.load_dynamic(__name__, path) ## mod.frozen = 1 __load() del __load """ # A very loosely defined "target". We assume either a "script" or "modules" # attribute. Some attributes will be target specific. class Target: def __init__(self, **kw): self.__dict__.update(kw) # If modules is a simple string, assume they meant list m = self.__dict__.get("modules") if m and type(m) in types.StringTypes: self.modules = [m] def get_dest_base(self): dest_base = getattr(self, "dest_base", None) if dest_base: return dest_base script = getattr(self, "script", None) if script: return os.path.basename(os.path.splitext(script)[0]) modules = getattr(self, "modules", None) assert modules, "no script, modules or dest_base specified" return modules[0].split(".")[-1] def validate(self): resources = getattr(self, "bitmap_resources", []) + \ getattr(self, "icon_resources", []) for r_id, r_filename in resources: if type(r_id) != type(0): raise DistutilsOptionError, "Resource ID must be an integer" if not os.path.isfile(r_filename): raise DistutilsOptionError, "Resource filename '%s' does not exist" % r_filename def FixupTargets(targets, default_attribute): if not targets: return targets ret = [] for target_def in targets: if type(target_def) in types.StringTypes : # Create a default target object, with the string as the attribute target = Target(**{default_attribute: target_def}) else: d = getattr(target_def, "__dict__", target_def) if not d.has_key(default_attribute): raise DistutilsOptionError, \ "This target class requires an attribute '%s'" % default_attribute target = Target(**d) target.validate() ret.append(target) return ret class py2exe(Command): description = "" # List of option tuples: long name, short name (None if no short # name), and help string. user_options = [ ('optimize=', 'O', "optimization level: -O1 for \"python -O\", " "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), ('dist-dir=', 'd', "directory to put final built distributions in (default is dist)"), ("excludes=", 'e', "comma-separated list of modules to exclude"), ("dll-excludes=", None, "comma-separated list of DLLs to exclude"), ("ignores=", None, "comma-separated list of modules to ignore if they are not found"), ("includes=", 'i', "comma-separated list of modules to include"), ("packages=", 'p', "comma-separated list of packages to include"), ("compressed", 'c', "create a compressed zipfile"), ("xref", 'x', "create and show a module cross reference"), ("bundle-files=", 'b', "bundle dlls in the zipfile or the exe. Valid levels are 1, 2, or 3 (default)"), ("skip-archive", None, "do not place Python bytecode files in an archive, put them directly in the file system"), ("ascii", 'a', "do not automatically include encodings and codecs"), ('custom-boot-script=', None, "Python file that will be run when setting up the runtime environment"), ] boolean_options = ["compressed", "xref", "ascii", "skip-archive"] def initialize_options (self): self.xref =0 self.compressed = 0 self.unbuffered = 0 self.optimize = 0 self.includes = None self.excludes = None self.ignores = None self.packages = None self.dist_dir = None self.dll_excludes = None self.typelibs = None self.bundle_files = 3 self.skip_archive = 0 self.ascii = 0 self.custom_boot_script = None def finalize_options (self): self.optimize = int(self.optimize) self.excludes = fancy_split(self.excludes) self.includes = fancy_split(self.includes) self.ignores = fancy_split(self.ignores) self.bundle_files = int(self.bundle_files) if self.bundle_files < 1 or self.bundle_files > 3: raise DistutilsOptionError, \ "bundle-files must be 1, 2, or 3, not %s" % self.bundle_files if is_win64 and self.bundle_files < 3: raise DistutilsOptionError, \ "bundle-files %d not yet supported on win64" % self.bundle_files if self.skip_archive: if self.compressed: raise DistutilsOptionError, \ "can't compress when skipping archive" if self.distribution.zipfile is None: raise DistutilsOptionError, \ "zipfile cannot be None when skipping archive" # includes is stronger than excludes for m in self.includes: if m in self.excludes: self.excludes.remove(m) self.packages = fancy_split(self.packages) self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) self.dll_excludes = [x.lower() for x in fancy_split(self.dll_excludes)] def run(self): build = self.reinitialize_command('build') build.run() sys_old_path = sys.path[:] if build.build_platlib is not None: sys.path.insert(0, build.build_platlib) if build.build_lib is not None: sys.path.insert(0, build.build_lib) try: self._run() finally: sys.path = sys_old_path def _run(self): self.create_directories() self.plat_prepare() self.fixup_distribution() dist = self.distribution # all of these contain module names required_modules = [] for target in dist.com_server + dist.service + dist.ctypes_com_server: required_modules.extend(target.modules) # and these contains file names required_files = [target.script for target in dist.windows + dist.console] mf = self.create_modulefinder() # These are the name of a script, but used as a module! for f in dist.isapi: mf.load_file(f.script) if self.typelibs: print "*** generate typelib stubs ***" from distutils.dir_util import mkpath genpy_temp = os.path.join(self.temp_dir, "win32com", "gen_py") mkpath(genpy_temp) num_stubs = collect_win32com_genpy(genpy_temp, self.typelibs, verbose=self.verbose, dry_run=self.dry_run) print "collected %d stubs from %d type libraries" \ % (num_stubs, len(self.typelibs)) mf.load_package("win32com.gen_py", genpy_temp) self.packages.append("win32com.gen_py") # monkey patching the compile builtin. # The idea is to include the filename in the error message orig_compile = compile import __builtin__ def my_compile(source, filename, *args): try: result = orig_compile(source, filename, *args) except Exception, details: raise DistutilsError, "compiling '%s' failed\n %s: %s" % \ (filename, details.__class__.__name__, details) return result __builtin__.compile = my_compile print "*** searching for required modules ***" self.find_needed_modules(mf, required_files, required_modules) print "*** parsing results ***" py_files, extensions, builtins = self.parse_mf_results(mf) if self.xref: mf.create_xref() print "*** finding dlls needed ***" dlls = self.find_dlls(extensions) self.plat_finalize(mf.modules, py_files, extensions, dlls) dlls = [item for item in dlls if os.path.basename(item).lower() not in self.dll_excludes] # should we filter self.other_depends in the same way? print "*** create binaries ***" self.create_binaries(py_files, extensions, dlls) self.fix_badmodules(mf) if mf.any_missing(): print "The following modules appear to be missing" print mf.any_missing() if self.other_depends: print print "*** binary dependencies ***" print "Your executable(s) also depend on these dlls which are not included," print "you may or may not need to distribute them." print print "Make sure you have the license if you distribute any of them, and" print "make sure you don't distribute files belonging to the operating system." print for fnm in self.other_depends: print " ", os.path.basename(fnm), "-", fnm.strip() def create_modulefinder(self): from modulefinder import ReplacePackage from py2exe.mf import ModuleFinder ReplacePackage("_xmlplus", "xml") return ModuleFinder(excludes=self.excludes) def fix_badmodules(self, mf): # This dictionary maps additional builtin module names to the # module that creates them. # For example, 'wxPython.misc' creates a builtin module named # 'miscc'. builtins = {"clip_dndc": "wxPython.clip_dnd", "cmndlgsc": "wxPython.cmndlgs", "controls2c": "wxPython.controls2", "controlsc": "wxPython.controls", "eventsc": "wxPython.events", "filesysc": "wxPython.filesys", "fontsc": "wxPython.fonts", "framesc": "wxPython.frames", "gdic": "wxPython.gdi", "imagec": "wxPython.image", "mdic": "wxPython.mdi", "misc2c": "wxPython.misc2", "miscc": "wxPython.misc", "printfwc": "wxPython.printfw", "sizersc": "wxPython.sizers", "stattoolc": "wxPython.stattool", "streamsc": "wxPython.streams", "utilsc": "wxPython.utils", "windows2c": "wxPython.windows2", "windows3c": "wxPython.windows3", "windowsc": "wxPython.windows", } # Somewhat hackish: change modulefinder's badmodules dictionary in place. bad = mf.badmodules # mf.badmodules is a dictionary mapping unfound module names # to another dictionary, the keys of this are the module names # importing the unknown module. For the 'miscc' module # mentioned above, it looks like this: # mf.badmodules["miscc"] = { "wxPython.miscc": 1 } for name in mf.any_missing(): if name in self.ignores: del bad[name] continue mod = builtins.get(name, None) if mod is not None: if mod in bad[name] and bad[name] == {mod: 1}: del bad[name] def find_dlls(self, extensions): dlls = [item.__file__ for item in extensions] ## extra_path = ["."] # XXX extra_path = [] dlls, unfriendly_dlls, other_depends = \ self.find_dependend_dlls(dlls, extra_path + sys.path, self.dll_excludes) self.other_depends = other_depends # dlls contains the path names of all dlls we need. # If a dll uses a function PyImport_ImportModule (or what was it?), # it's name is additionally in unfriendly_dlls. for item in extensions: if item.__file__ in dlls: dlls.remove(item.__file__) return dlls def create_directories(self): bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'winexe') collect_name = "collect-%d.%d" % sys.version_info[:2] self.collect_dir = os.path.abspath(os.path.join(self.bdist_dir, collect_name)) self.mkpath(self.collect_dir) bundle_name = "bundle-%d.%d" % sys.version_info[:2] self.bundle_dir = os.path.abspath(os.path.join(self.bdist_dir, bundle_name)) self.mkpath(self.bundle_dir) self.temp_dir = os.path.abspath(os.path.join(self.bdist_dir, "temp")) self.mkpath(self.temp_dir) self.dist_dir = os.path.abspath(self.dist_dir) self.mkpath(self.dist_dir) if self.distribution.zipfile is None: self.lib_dir = self.dist_dir else: self.lib_dir = os.path.join(self.dist_dir, os.path.dirname(self.distribution.zipfile)) self.mkpath(self.lib_dir) def copy_extensions(self, extensions): print "*** copy extensions ***" # copy the extensions to the target directory for item in extensions: src = item.__file__ if self.bundle_files > 2: # don't bundle pyds and dlls dst = os.path.join(self.lib_dir, (item.__pydfile__)) self.copy_file(src, dst, preserve_mode=0) self.lib_files.append(dst) else: # we have to preserve the packages package = "\\".join(item.__name__.split(".")[:-1]) if package: dst = os.path.join(package, os.path.basename(src)) else: dst = os.path.basename(src) self.copy_file(src, os.path.join(self.collect_dir, dst), preserve_mode=0) self.compiled_files.append(dst) def copy_dlls(self, dlls): # copy needed dlls where they belong. print "*** copy dlls ***" if self.bundle_files < 3: self.copy_dlls_bundle_files(dlls) return # dlls belong into the lib_dir, except those listed in dlls_in_exedir, # which have to go into exe_dir (pythonxy.dll, w9xpopen.exe). for dll in dlls: base = os.path.basename(dll) if base.lower() in self.dlls_in_exedir: # These special dlls cannot be in the lib directory, # they must go into the exe directory. dst = os.path.join(self.exe_dir, base) else: dst = os.path.join(self.lib_dir, base) _, copied = self.copy_file(dll, dst, preserve_mode=0) if not self.dry_run and copied and base.lower() == python_dll.lower(): # If we actually copied pythonxy.dll, we have to patch it. # # Previously, the code did it every time, but this # breaks if, for example, someone runs UPX over the # dist directory. Patching an UPX'd dll seems to work # (no error is detected when patching), but the # resulting dll does not work anymore. # # The function restores the file times so # dependencies still work correctly. self.patch_python_dll_winver(dst) self.lib_files.append(dst) def copy_dlls_bundle_files(self, dlls): # If dlls have to be bundled, they are copied into the # collect_dir and will be added to the list of files to # include in the zip archive 'self.compiled_files'. # # dlls listed in dlls_in_exedir have to be treated differently: # for dll in dlls: base = os.path.basename(dll) if base.lower() in self.dlls_in_exedir: # pythonXY.dll must be bundled as resource. # w9xpopen.exe must be copied to self.exe_dir. if base.lower() == python_dll.lower() and self.bundle_files < 2: dst = os.path.join(self.bundle_dir, base) else: dst = os.path.join(self.exe_dir, base) _, copied = self.copy_file(dll, dst, preserve_mode=0) if not self.dry_run and copied and base.lower() == python_dll.lower(): # If we actually copied pythonxy.dll, we have to # patch it. Well, since it's impossible to load # resources from the bundled dlls it probably # doesn't matter. self.patch_python_dll_winver(dst) self.lib_files.append(dst) continue dst = os.path.join(self.collect_dir, os.path.basename(dll)) self.copy_file(dll, dst, preserve_mode=0) # Make sure they will be included into the zipfile. self.compiled_files.append(os.path.basename(dst)) def create_binaries(self, py_files, extensions, dlls): dist = self.distribution # byte compile the python modules into the target directory print "*** byte compile python files ***" self.compiled_files = byte_compile(py_files, target_dir=self.collect_dir, optimize=self.optimize, force=0, verbose=self.verbose, dry_run=self.dry_run) self.lib_files = [] self.console_exe_files = [] self.windows_exe_files = [] self.service_exe_files = [] self.comserver_files = [] self.copy_extensions(extensions) self.copy_dlls(dlls) # create the shared zipfile containing all Python modules if dist.zipfile is None: fd, archive_name = tempfile.mkstemp() os.close(fd) else: archive_name = os.path.join(self.lib_dir, os.path.basename(dist.zipfile)) arcname = self.make_lib_archive(archive_name, base_dir=self.collect_dir, files=self.compiled_files, verbose=self.verbose, dry_run=self.dry_run) if dist.zipfile is not None: self.lib_files.append(arcname) for target in self.distribution.isapi: print "*** copy isapi support DLL ***" # Locate the support DLL, and copy as "_script.dll", just like # isapi itself import isapi src_name = is_debug_build and "PyISAPI_loader_d.dll" or \ "PyISAPI_loader.dll" src = os.path.join(isapi.__path__[0], src_name) # destination name is "_{module_name}.dll" just like pyisapi does. script_base = os.path.splitext(os.path.basename(target.script))[0] dst = os.path.join(self.exe_dir, "_" + script_base + ".dll") self.copy_file(src, dst, preserve_mode=0) if self.distribution.has_data_files(): print "*** copy data files ***" install_data = self.reinitialize_command('install_data') install_data.install_dir = self.dist_dir install_data.ensure_finalized() install_data.run() self.lib_files.extend(install_data.get_outputs()) # build the executables for target in dist.console: dst = self.build_executable(target, self.get_console_template(), arcname, target.script) self.console_exe_files.append(dst) for target in dist.windows: dst = self.build_executable(target, self.get_windows_template(), arcname, target.script) self.windows_exe_files.append(dst) for target in dist.service: dst = self.build_service(target, self.get_service_template(), arcname) self.service_exe_files.append(dst) for target in dist.isapi: dst = self.build_isapi(target, self.get_isapi_template(), arcname) for target in dist.com_server: if getattr(target, "create_exe", True): dst = self.build_comserver(target, self.get_comexe_template(), arcname) self.comserver_files.append(dst) if getattr(target, "create_dll", True): dst = self.build_comserver(target, self.get_comdll_template(), arcname) self.comserver_files.append(dst) for target in dist.ctypes_com_server: dst = self.build_comserver(target, self.get_ctypes_comdll_template(), arcname, boot_script="ctypes_com_server") self.comserver_files.append(dst) if dist.zipfile is None: os.unlink(arcname) else: if self.bundle_files < 3 or self.compressed: arcbytes = open(arcname, "rb").read() arcfile = open(arcname, "wb") if self.bundle_files < 2: # bundle pythonxy.dll also print "Adding %s to %s" % (python_dll, arcname) arcfile.write("<pythondll>") bytes = open(os.path.join(self.bundle_dir, python_dll), "rb").read() arcfile.write(struct.pack("i", len(bytes))) arcfile.write(bytes) # python dll if self.compressed: # prepend zlib.pyd also zlib_file = imp.find_module("zlib")[0] if zlib_file: print "Adding zlib%s.pyd to %s" % (is_debug_build and "_d" or "", arcname) arcfile.write("<zlib.pyd>") bytes = zlib_file.read() arcfile.write(struct.pack("i", len(bytes))) arcfile.write(bytes) # zlib.pyd arcfile.write(arcbytes) #### if self.bundle_files < 2: #### # remove python dll from the exe_dir, since it is now bundled. #### os.remove(os.path.join(self.exe_dir, python_dll)) # for user convenience, let subclasses override the templates to use def get_console_template(self): return is_debug_build and "run_d.exe" or "run.exe" def get_windows_template(self): return is_debug_build and "run_w_d.exe" or "run_w.exe" def get_service_template(self): return is_debug_build and "run_d.exe" or "run.exe" def get_isapi_template(self): return is_debug_build and "run_isapi_d.dll" or "run_isapi.dll" def get_comexe_template(self): return is_debug_build and "run_w_d.exe" or "run_w.exe" def get_comdll_template(self): return is_debug_build and "run_dll_d.dll" or "run_dll.dll" def get_ctypes_comdll_template(self): return is_debug_build and "run_ctypes_dll_d.dll" or "run_ctypes_dll.dll" def fixup_distribution(self): dist = self.distribution # Convert our args into target objects. dist.com_server = FixupTargets(dist.com_server, "modules") dist.ctypes_com_server = FixupTargets(dist.ctypes_com_server, "modules") dist.service = FixupTargets(dist.service, "modules") dist.windows = FixupTargets(dist.windows, "script") dist.console = FixupTargets(dist.console, "script") dist.isapi = FixupTargets(dist.isapi, "script") # make sure all targets use the same directory, this is # also the directory where the pythonXX.dll must reside paths = sets.Set() for target in dist.com_server + dist.service \ + dist.windows + dist.console + dist.isapi: paths.add(os.path.dirname(target.get_dest_base())) if len(paths) > 1: raise DistutilsOptionError, \ "all targets must use the same directory: %s" % \ [p for p in paths] if paths: exe_dir = paths.pop() # the only element if os.path.isabs(exe_dir): raise DistutilsOptionError, \ "exe directory must be relative: %s" % exe_dir self.exe_dir = os.path.join(self.dist_dir, exe_dir) self.mkpath(self.exe_dir) else: # Do we allow to specify no targets? # We can at least build a zipfile... self.exe_dir = self.lib_dir def get_boot_script(self, boot_type): # return the filename of the script to use for com servers. thisfile = sys.modules['py2exe.build_exe'].__file__ return os.path.join(os.path.dirname(thisfile), "boot_" + boot_type + ".py") def build_comserver(self, target, template, arcname, boot_script="com_servers"): # Build a dll and an exe executable hosting all the com # objects listed in module_names. # The basename of the dll/exe is the last part of the first module. # Do we need a way to specify the name of the files to be built? # Setup the variables our boot script needs. vars = {"com_module_names" : target.modules} boot = self.get_boot_script(boot_script) # and build it return self.build_executable(target, template, arcname, boot, vars) def get_service_names(self, module_name): # import the script with every side effect :) __import__(module_name) mod = sys.modules[module_name] for name, klass in mod.__dict__.items(): if hasattr(klass, "_svc_name_"): break else: raise RuntimeError, "No services in module" deps = () if hasattr(klass, "_svc_deps_"): deps = klass._svc_deps_ return klass.__name__, klass._svc_name_, klass._svc_display_name_, deps def build_service(self, target, template, arcname): # It should be possible to host many modules in a single service - # but this is yet to be tested. assert len(target.modules)==1, "We only support one service module" cmdline_style = getattr(target, "cmdline_style", "py2exe") if cmdline_style not in ["py2exe", "pywin32", "custom"]: raise RuntimeError, "cmdline_handler invalid" vars = {"service_module_names" : target.modules, "cmdline_style": cmdline_style, } boot = self.get_boot_script("service") return self.build_executable(target, template, arcname, boot, vars) def build_isapi(self, target, template, arcname): target_module = os.path.splitext(os.path.basename(target.script))[0] vars = {"isapi_module_name" : target_module, } return self.build_executable(target, template, arcname, None, vars) def build_executable(self, target, template, arcname, script, vars={}): # Build an executable for the target # template is the exe-stub to use, and arcname is the zipfile # containing the python modules. from py2exe_util import add_resource, add_icon ext = os.path.splitext(template)[1] exe_base = target.get_dest_base() exe_path = os.path.join(self.dist_dir, exe_base + ext) # The user may specify a sub-directory for the exe - that's fine, we # just specify the parent directory for the .zip parent_levels = len(os.path.normpath(exe_base).split(os.sep))-1 lib_leaf = self.lib_dir[len(self.dist_dir)+1:] relative_arcname = ((".." + os.sep) * parent_levels) if lib_leaf: relative_arcname += lib_leaf + os.sep relative_arcname += os.path.basename(arcname) src = os.path.join(os.path.dirname(__file__), template) # We want to force the creation of this file, as otherwise distutils # will see the earlier time of our 'template' file versus the later # time of our modified template file, and consider our old file OK. old_force = self.force self.force = True self.copy_file(src, exe_path, preserve_mode=0) self.force = old_force # Make sure the file is writeable... os.chmod(exe_path, stat.S_IREAD | stat.S_IWRITE) try: f = open(exe_path, "a+b") f.close() except IOError, why: print "WARNING: File %s could not be opened - %s" % (exe_path, why) # We create a list of code objects, and write it as a marshaled # stream. The framework code then just exec's these in order. # First is our common boot script. boot = self.get_boot_script("common") boot_code = compile(file(boot, "U").read(), os.path.abspath(boot), "exec") code_objects = [boot_code] if self.bundle_files < 3: code_objects.append( compile("import zipextimporter; zipextimporter.install()", "<install zipextimporter>", "exec")) for var_name, var_val in vars.items(): code_objects.append( compile("%s=%r\n" % (var_name, var_val), var_name, "exec") ) if self.custom_boot_script: code_object = compile(file(self.custom_boot_script, "U").read() + "\n", os.path.abspath(self.custom_boot_script), "exec") code_objects.append(code_object) if script: code_object = compile(open(script, "U").read() + "\n", os.path.basename(script), "exec") code_objects.append(code_object) code_bytes = marshal.dumps(code_objects) if self.distribution.zipfile is None: relative_arcname = "" si = struct.pack("iiii", 0x78563412, # a magic value, self.optimize, self.unbuffered, len(code_bytes), ) + relative_arcname + "\000" script_bytes = si + code_bytes + '\000\000' self.announce("add script resource, %d bytes" % len(script_bytes)) if not self.dry_run: add_resource(ensure_unicode(exe_path), script_bytes, u"PYTHONSCRIPT", 1, True) # add the pythondll as resource, and delete in self.exe_dir if self.bundle_files < 2 and self.distribution.zipfile is None: # bundle pythonxy.dll dll_path = os.path.join(self.bundle_dir, python_dll) bytes = open(dll_path, "rb").read() # image, bytes, lpName, lpType print "Adding %s as resource to %s" % (python_dll, exe_path) add_resource(ensure_unicode(exe_path), bytes, # for some reason, the 3. argument MUST BE UPPER CASE, # otherwise the resource will not be found. ensure_unicode(python_dll).upper(), 1, False) if self.compressed and self.bundle_files < 3 and self.distribution.zipfile is None: zlib_file = imp.find_module("zlib")[0] if zlib_file: print "Adding zlib.pyd as resource to %s" % exe_path zlib_bytes = zlib_file.read() add_resource(ensure_unicode(exe_path), zlib_bytes, # for some reason, the 3. argument MUST BE UPPER CASE, # otherwise the resource will not be found. u"ZLIB.PYD", 1, False) # Handle all resources specified by the target bitmap_resources = getattr(target, "bitmap_resources", []) for bmp_id, bmp_filename in bitmap_resources: bmp_data = open(bmp_filename, "rb").read() # skip the 14 byte bitmap header. if not self.dry_run: add_resource(ensure_unicode(exe_path), bmp_data[14:], RT_BITMAP, bmp_id, False) icon_resources = getattr(target, "icon_resources", []) for ico_id, ico_filename in icon_resources: if not self.dry_run: add_icon(ensure_unicode(exe_path), ensure_unicode(ico_filename), ico_id) for res_type, res_id, data in getattr(target, "other_resources", []): if not self.dry_run: if isinstance(res_type, basestring): res_type = ensure_unicode(res_type) add_resource(ensure_unicode(exe_path), data, res_type, res_id, False) typelib = getattr(target, "typelib", None) if typelib is not None: data = open(typelib, "rb").read() add_resource(ensure_unicode(exe_path), data, u"TYPELIB", 1, False) self.add_versioninfo(target, exe_path) # Hm, this doesn't make sense with normal executables, which are # already small (around 20 kB). # # But it would make sense with static build pythons, but not # if the zipfile is appended to the exe - it will be too slow # then (although it is a wonder it works at all in this case). # # Maybe it would be faster to use the frozen modules machanism # instead of the zip-import? ## if self.compressed: ## import gc ## gc.collect() # to close all open files! ## os.system("upx -9 %s" % exe_path) if self.distribution.zipfile is None: zip_data = open(arcname, "rb").read() open(exe_path, "a+b").write(zip_data) return exe_path def add_versioninfo(self, target, exe_path): # Try to build and add a versioninfo resource def get(name, md = self.distribution.metadata): # Try to get an attribute from the target, if not defined # there, from the distribution's metadata, or None. Note # that only *some* attributes are allowed by distutils on # the distribution's metadata: version, description, and # name. return getattr(target, name, getattr(md, name, None)) version = get("version") if version is None: return from py2exe.resources.VersionInfo import Version, RT_VERSION, VersionError version = Version(version, file_description = get("description"), comments = get("comments"), company_name = get("company_name"), legal_copyright = get("copyright"), legal_trademarks = get("trademarks"), original_filename = os.path.basename(exe_path), product_name = get("name"), product_version = get("product_version") or version) try: bytes = version.resource_bytes() except VersionError, detail: self.warn("Version Info will not be included:\n %s" % detail) return from py2exe_util import add_resource add_resource(ensure_unicode(exe_path), bytes, RT_VERSION, 1, False) def patch_python_dll_winver(self, dll_name, new_winver = None): from py2exe.resources.StringTables import StringTable, RT_STRING from py2exe_util import add_resource new_winver = new_winver or self.distribution.metadata.name or "py2exe" if self.verbose: print "setting sys.winver for '%s' to '%s'" % (dll_name, new_winver) if self.dry_run: return # We preserve the times on the file, so the dependency tracker works. st = os.stat(dll_name) # and as the resource functions silently fail if the open fails, # check it explicitly. os.chmod(dll_name, stat.S_IREAD | stat.S_IWRITE) try: f = open(dll_name, "a+b") f.close() except IOError, why: print "WARNING: File %s could not be opened - %s" % (dll_name, why) # OK - do it. s = StringTable() # 1000 is the resource ID Python loads for its winver. s.add_string(1000, new_winver) delete = True for id, data in s.binary(): add_resource(ensure_unicode(dll_name), data, RT_STRING, id, delete) delete = False # restore the time. os.utime(dll_name, (st[stat.ST_ATIME], st[stat.ST_MTIME])) def find_dependend_dlls(self, dlls, pypath, dll_excludes): import py2exe_util sysdir = py2exe_util.get_sysdir() windir = py2exe_util.get_windir() # This is the tail of the path windows uses when looking for dlls # XXX On Windows NT, the SYSTEM directory is also searched exedir = os.path.dirname(sys.executable) syspath = os.environ['PATH'] loadpath = ';'.join([exedir, sysdir, windir, syspath]) # Found by Duncan Booth: # It may be possible that bin_depends needs extension modules, # so the loadpath must be extended by our python path. loadpath = loadpath + ';' + ';'.join(pypath) templates = sets.Set() if self.distribution.console: templates.add(self.get_console_template()) if self.distribution.windows: templates.add(self.get_windows_template()) if self.distribution.service: templates.add(self.get_service_template()) for target in self.distribution.com_server: if getattr(target, "create_exe", True): templates.add(self.get_comexe_template()) if getattr(target, "create_dll", True): templates.add(self.get_comdll_template()) templates = [os.path.join(os.path.dirname(__file__), t) for t in templates] # We use Python.exe to track the dependencies of our run stubs ... images = dlls + templates self.announce("Resolving binary dependencies:") # we add python.exe (aka sys.executable) to the list of images # to scan for dependencies, but remove it later again from the # results list. In this way pythonXY.dll is collected, and # also the libraries it depends on. alldlls, warnings, other_depends = \ bin_depends(loadpath, images + [sys.executable], dll_excludes) alldlls.remove(sys.executable) for dll in alldlls: self.announce(" %s" % dll) # ... but we don't need the exe stubs run_xxx.exe for t in templates: alldlls.remove(t) return alldlls, warnings, other_depends # find_dependend_dlls() def get_hidden_imports(self): # imports done from builtin modules in C code (untrackable by py2exe) return {"time": ["_strptime"], ## "datetime": ["time"], "cPickle": ["copy_reg"], "parser": ["copy_reg"], "codecs": ["encodings"], "cStringIO": ["copy_reg"], "_sre": ["copy", "string", "sre"], } def parse_mf_results(self, mf): for name, imports in self.get_hidden_imports().items(): if name in mf.modules.keys(): for mod in imports: mf.import_hook(mod) tcl_src_dir = tcl_dst_dir = None if "Tkinter" in mf.modules.keys(): import Tkinter import _tkinter tk = _tkinter.create() tcl_dir = tk.call("info", "library") tcl_src_dir = os.path.split(tcl_dir)[0] tcl_dst_dir = os.path.join(self.lib_dir, "tcl") self.announce("Copying TCL files from %s..." % tcl_src_dir) self.copy_tree(os.path.join(tcl_src_dir, "tcl%s" % _tkinter.TCL_VERSION), os.path.join(tcl_dst_dir, "tcl%s" % _tkinter.TCL_VERSION)) self.copy_tree(os.path.join(tcl_src_dir, "tk%s" % _tkinter.TK_VERSION), os.path.join(tcl_dst_dir, "tk%s" % _tkinter.TK_VERSION)) del tk, _tkinter, Tkinter # Retrieve modules from modulefinder py_files = [] extensions = [] builtins = [] for item in mf.modules.values(): # There may be __main__ modules (from mf.run_script), but # we don't need them in the zipfile we build. if item.__name__ == "__main__": continue if self.bundle_files < 3 and item.__name__ in ("pythoncom", "pywintypes"): # these are handled specially in zipextimporter. continue src = item.__file__ if src: base, ext = os.path.splitext(src) suffix = ext if sys.platform.startswith("win") and ext in [".dll", ".pyd"] \ and base.endswith("_d"): suffix = "_d" + ext if suffix in _py_suffixes: py_files.append(item) elif suffix in _c_suffixes: extensions.append(item) if not self.bundle_files < 3: loader = self.create_loader(item) if loader: py_files.append(loader) else: raise RuntimeError \ ("Don't know how to handle '%s'" % repr(src)) else: builtins.append(item.__name__) # sort on the file names, the output is nicer to read py_files.sort(lambda a, b: cmp(a.__file__, b.__file__)) extensions.sort(lambda a, b: cmp(a.__file__, b.__file__)) builtins.sort() return py_files, extensions, builtins def plat_finalize(self, modules, py_files, extensions, dlls): # platform specific code for final adjustments to the file # lists if sys.platform == "win32": # pythoncom and pywintypes are imported via LoadLibrary calls, # help py2exe to include the dlls: if "pythoncom" in modules.keys(): import pythoncom dlls.add(pythoncom.__file__) if "pywintypes" in modules.keys(): import pywintypes dlls.add(pywintypes.__file__) self.copy_w9xpopen(modules, dlls) else: raise DistutilsError, "Platform %s not yet implemented" % sys.platform def copy_w9xpopen(self, modules, dlls): # Using popen requires (on Win9X) the w9xpopen.exe helper executable. if "os" in modules.keys() or "popen2" in modules.keys(): if is_debug_build: fname = os.path.join(os.path.dirname(sys.executable), "w9xpopen_d.exe") else: fname = os.path.join(os.path.dirname(sys.executable), "w9xpopen.exe") # Don't copy w9xpopen.exe if it doesn't exist (64-bit # Python build, for example) if os.path.exists(fname): dlls.add(fname) def create_loader(self, item): # Hm, how to avoid needless recreation of this file? pathname = os.path.join(self.temp_dir, "%s.py" % item.__name__) if self.bundle_files > 2: # don't bundle pyds and dlls # all dlls are copied into the same directory, so modify # names to include the package name to avoid name # conflicts and tuck it away for future reference fname = item.__name__ + os.path.splitext(item.__file__)[1] item.__pydfile__ = fname else: fname = os.path.basename(item.__file__) # and what about dry_run? if self.verbose: print "creating python loader for extension '%s' (%s -> %s)" % (item.__name__,item.__file__,fname) source = LOADER % fname if not self.dry_run: open(pathname, "w").write(source) else: return None from modulefinder import Module return Module(item.__name__, pathname) def plat_prepare(self): self.includes.append("warnings") # needed by Python itself if not self.ascii: self.packages.append("encodings") self.includes.append("codecs") if self.bundle_files < 3: self.includes.append("zipextimporter") self.excludes.append("_memimporter") # builtin in run_*.exe and run_*.dll if self.compressed: self.includes.append("zlib") # os.path will never be found ;-) self.ignores.append('os.path') # update the self.ignores list to ignore platform specific # modules. if sys.platform == "win32": self.ignores += ['AL', 'Audio_mac', 'Carbon.File', 'Carbon.Folder', 'Carbon.Folders', 'EasyDialogs', 'MacOS', 'Mailman', 'SOCKS', 'SUNAUDIODEV', '_dummy_threading', '_emx_link', '_xmlplus', '_xmlrpclib', 'al', 'bundlebuilder', 'ce', 'cl', 'dbm', 'dos', 'fcntl', 'gestalt', 'grp', 'ic', 'java.lang', 'mac', 'macfs', 'macostools', 'mkcwproject', 'org.python.core', 'os.path', 'os2', 'poll', 'posix', 'pwd', 'readline', 'riscos', 'riscosenviron', 'riscospath', 'rourl2path', 'sgi', 'sgmlop', 'sunaudiodev', 'termios', 'vms_lib'] # special dlls which must be copied to the exe_dir, not the lib_dir self.dlls_in_exedir = [python_dll, "w9xpopen%s.exe" % (is_debug_build and "_d" or ""), "msvcr71%s.dll" % (is_debug_build and "d" or "")] else: raise DistutilsError, "Platform %s not yet implemented" % sys.platform def find_needed_modules(self, mf, files, modules): # feed Modulefinder with everything, and return it. for mod in modules: mf.import_hook(mod) for path in files: mf.run_script(path) mf.run_script(self.get_boot_script("common")) if self.distribution.com_server: mf.run_script(self.get_boot_script("com_servers")) if self.distribution.ctypes_com_server: mf.run_script(self.get_boot_script("ctypes_com_server")) if self.distribution.service: mf.run_script(self.get_boot_script("service")) if self.custom_boot_script: mf.run_script(self.custom_boot_script) for mod in self.includes: if mod[-2:] == '.*': mf.import_hook(mod[:-2], None, ['*']) else: mf.import_hook(mod) for f in self.packages: def visit(arg, dirname, names): if '__init__.py' in names: arg.append(dirname) # Try to find the package using ModuleFinders's method to # allow for modulefinder.AddPackagePath interactions mf.import_hook(f) # If modulefinder has seen a reference to the package, then # we prefer to believe that (imp_find_module doesn't seem to locate # sub-packages) if f in mf.modules: module = mf.modules[f] if module.__path__ is None: # it's a module, not a package, so paths contains just the # file entry paths = [module.__file__] else: # it is a package because __path__ is available. __path__ # is actually a list of paths that are searched to import # sub-modules and sub-packages paths = module.__path__ else: # Find path of package try: paths = [imp_find_module(f)[1]] except ImportError: self.warn("No package named %s" % f) continue packages = [] for path in paths: # walk the path to find subdirs containing __init__.py files os.path.walk(path, visit, packages) # scan the results (directory of __init__.py files) # first trim the path (of the head package), # then convert directory name in package name, # finally push into modulefinder. for p in packages: if p.startswith(path): package = f + '.' + p[len(path)+1:].replace('\\', '.') mf.import_hook(package, None, ["*"]) return mf def make_lib_archive(self, zip_filename, base_dir, files, verbose=0, dry_run=0): from distutils.dir_util import mkpath if not self.skip_archive: # Like distutils "make_archive", but we can specify the files # to include, and the compression to use - default is # ZIP_STORED to keep the runtime performance up. Also, we # don't append '.zip' to the filename. mkpath(os.path.dirname(zip_filename), dry_run=dry_run) if self.compressed: compression = zipfile.ZIP_DEFLATED else: compression = zipfile.ZIP_STORED if not dry_run: z = zipfile.ZipFile(zip_filename, "w", compression=compression) for f in files: z.write(os.path.join(base_dir, f), f) z.close() return zip_filename else: # Don't really produce an archive, just copy the files. from distutils.file_util import copy_file destFolder = os.path.dirname(zip_filename) for f in files: d = os.path.dirname(f) if d: mkpath(os.path.join(destFolder, d), verbose=verbose, dry_run=dry_run) copy_file( os.path.join(base_dir, f), os.path.join(destFolder, f), preserve_mode=0, verbose=verbose, dry_run=dry_run ) return '.' ################################################################ class FileSet: # A case insensitive but case preserving set of files def __init__(self, iterable=None): self._dict = {} if iterable is not None: for arg in iterable: self.add(arg) def __repr__(self): return "<FileSet %s at %x>" % (self._dict.values(), id(self)) def add(self, fname): self._dict[fname.upper()] = fname def remove(self, fname): del self._dict[fname.upper()] def __contains__(self, fname): return fname.upper() in self._dict.keys() def __getitem__(self, index): key = self._dict.keys()[index] return self._dict[key] def __len__(self): return len(self._dict) def copy(self): res = FileSet() res._dict.update(self._dict) return res # class FileSet() def bin_depends(path, images, excluded_dlls): import py2exe_util warnings = FileSet() images = FileSet(images) dependents = FileSet() others = FileSet() while images: for image in images.copy(): images.remove(image) if not image in dependents: dependents.add(image) abs_image = os.path.abspath(image) loadpath = os.path.dirname(abs_image) + ';' + path for result in py2exe_util.depends(image, loadpath).items(): dll, uses_import_module = result if os.path.basename(dll).lower() not in excluded_dlls: if isSystemDLL(dll): others.add(dll) continue if dll not in images and dll not in dependents: images.add(dll) if uses_import_module: warnings.add(dll) return dependents, warnings, others # DLLs to be excluded # XXX This list is NOT complete (it cannot be) # Note: ALL ENTRIES MUST BE IN LOWER CASE! EXCLUDED_DLLS = ( "advapi32.dll", "comctl32.dll", "comdlg32.dll", "crtdll.dll", "gdi32.dll", "glu32.dll", "opengl32.dll", "imm32.dll", "kernel32.dll", "mfc42.dll", "msvcirt.dll", "msvcrt.dll", "msvcrtd.dll", "ntdll.dll", "odbc32.dll", "ole32.dll", "oleaut32.dll", "rpcrt4.dll", "shell32.dll", "shlwapi.dll", "user32.dll", "version.dll", "winmm.dll", "winspool.drv", "ws2_32.dll", "ws2help.dll", "wsock32.dll", "netapi32.dll", "gdiplus.dll", ) # XXX Perhaps it would be better to assume dlls from the systemdir are system dlls, # and make some exceptions for known dlls, like msvcr71, pythonXY.dll, and so on? def isSystemDLL(pathname): if os.path.basename(pathname).lower() in ("msvcr71.dll", "msvcr71d.dll"): return 0 if os.path.basename(pathname).lower() in EXCLUDED_DLLS: return 1 # How can we determine whether a dll is a 'SYSTEM DLL'? # Is it sufficient to use the Image Load Address? import struct file = open(pathname, "rb") if file.read(2) != "MZ": raise Exception, "Seems not to be an exe-file" file.seek(0x3C) pe_ofs = struct.unpack("i", file.read(4))[0] file.seek(pe_ofs) if file.read(4) != "PE\000\000": raise Exception, ("Seems not to be an exe-file", pathname) file.read(20 + 28) # COFF File Header, offset of ImageBase in Optional Header imagebase = struct.unpack("I", file.read(4))[0] return not (imagebase < 0x70000000) def byte_compile(py_files, optimize=0, force=0, target_dir=None, verbose=1, dry_run=0, direct=None): if direct is None: direct = (__debug__ and optimize == 0) # "Indirect" byte-compilation: write a temporary script and then # run it with the appropriate flags. if not direct: from tempfile import mktemp from distutils.util import execute script_name = mktemp(".py") if verbose: print "writing byte-compilation script '%s'" % script_name if not dry_run: script = open(script_name, "w") script.write("""\ from py2exe.build_exe import byte_compile from modulefinder import Module files = [ """) for f in py_files: script.write("Module(%s, %s, %s),\n" % \ (`f.__name__`, `f.__file__`, `f.__path__`)) script.write("]\n") script.write(""" byte_compile(files, optimize=%s, force=%s, target_dir=%s, verbose=%s, dry_run=0, direct=1) """ % (`optimize`, `force`, `target_dir`, `verbose`)) script.close() cmd = [sys.executable, script_name] if optimize == 1: cmd.insert(1, "-O") elif optimize == 2: cmd.insert(1, "-OO") spawn(cmd, verbose=verbose, dry_run=dry_run) execute(os.remove, (script_name,), "removing %s" % script_name, verbose=verbose, dry_run=dry_run) else: from py_compile import compile from distutils.dir_util import mkpath from distutils.dep_util import newer from distutils.file_util import copy_file for file in py_files: # Terminology from the py_compile module: # cfile - byte-compiled file # dfile - purported source filename (same as 'file' by default) cfile = file.__name__.replace('.', '\\') if file.__path__: dfile = cfile + '\\__init__.py' + (__debug__ and 'c' or 'o') else: dfile = cfile + '.py' + (__debug__ and 'c' or 'o') if target_dir: cfile = os.path.join(target_dir, dfile) if force or newer(file.__file__, cfile): if verbose: print "byte-compiling %s to %s" % (file.__file__, dfile) if not dry_run: mkpath(os.path.dirname(cfile)) suffix = os.path.splitext(file.__file__)[1] if suffix in (".py", ".pyw"): compile(file.__file__, cfile, dfile) elif suffix in _py_suffixes: # Minor problem: This will happily copy a file # <mod>.pyo to <mod>.pyc or <mod>.pyc to # <mod>.pyo, but it does seem to work. copy_file(file.__file__, cfile, preserve_mode=0) else: raise RuntimeError \ ("Don't know how to handle %r" % file.__file__) else: if verbose: print "skipping byte-compilation of %s to %s" % \ (file.__file__, dfile) compiled_files = [] for file in py_files: cfile = file.__name__.replace('.', '\\') if file.__path__: dfile = cfile + '\\__init__.py' + (optimize and 'o' or 'c') else: dfile = cfile + '.py' + (optimize and 'o' or 'c') compiled_files.append(dfile) return compiled_files # byte_compile() # win32com makepy helper. def collect_win32com_genpy(path, typelibs, verbose=0, dry_run=0): import win32com from win32com.client import gencache, makepy from distutils.file_util import copy_file old_gen_path = win32com.__gen_path__ num = 0 try: win32com.__gen_path__ = path win32com.gen_py.__path__ = [path] gencache.__init__() for info in typelibs: guid, lcid, major, minor = info[:4] # They may provide an input filename in the tuple - in which case # they will have pre-generated it on a machine with the typelibs # installed, and just want us to include it. fname_in = None if len(info) > 4: fname_in = info[4] if fname_in is not None: base = gencache.GetGeneratedFileName(guid, lcid, major, minor) fname_out = os.path.join(path, base) + ".py" copy_file(fname_in, fname_out, verbose=verbose, dry_run=dry_run) num += 1 # That's all we gotta do! continue # It seems bForDemand=True generates code which is missing # at least sometimes an import of DispatchBaseClass. # Until this is resolved, set it to false. # What's the purpose of bForDemand=True? Thomas # bForDemand is supposed to only generate stubs when each # individual object is referenced. A side-effect of that is # that each object gets its own source file. The intent of # this code was to set bForDemand=True, meaning we get the # 'file per object' behaviour, but then explicitly walk all # children forcing them to be built - so the entire object model # is included, but not in a huge .pyc. # I'm not sure why its not working :) I'll debug later. # bForDemand=False isn't really important here - the overhead for # monolithic typelib stubs is in the compilation, not the loading # of an existing .pyc. Mark. ## makepy.GenerateFromTypeLibSpec(info, bForDemand = True) tlb_info = (guid, lcid, major, minor) makepy.GenerateFromTypeLibSpec(tlb_info, bForDemand = False) # Now get the module, and build all sub-modules. mod = gencache.GetModuleForTypelib(*tlb_info) for clsid, name in mod.CLSIDToPackageMap.items(): try: gencache.GetModuleForCLSID(clsid) num += 1 #print "", name except ImportError: pass return num finally: # restore win32com, just in case. win32com.__gen_path__ = old_gen_path win32com.gen_py.__path__ = [old_gen_path] gencache.__init__() # utilities hacked from distutils.dir_util def _chmod(file): os.chmod(file, 0777) # Helper for force_remove_tree() def _build_cmdtuple(path, cmdtuples): for f in os.listdir(path): real_f = os.path.join(path,f) if os.path.isdir(real_f) and not os.path.islink(real_f): _build_cmdtuple(real_f, cmdtuples) else: cmdtuples.append((_chmod, real_f)) cmdtuples.append((os.remove, real_f)) cmdtuples.append((os.rmdir, path)) def force_remove_tree (directory, verbose=0, dry_run=0): """Recursively remove an entire directory tree. Any errors are ignored (apart from being reported to stdout if 'verbose' is true). """ import distutils from distutils.util import grok_environment_error _path_created = distutils.dir_util._path_created if verbose: print "removing '%s' (and everything under it)" % directory if dry_run: return cmdtuples = [] _build_cmdtuple(directory, cmdtuples) for cmd in cmdtuples: try: cmd[0](cmd[1]) # remove dir from cache if it's already there abspath = os.path.abspath(cmd[1]) if _path_created.has_key(abspath): del _path_created[abspath] except (IOError, OSError), exc: if verbose: print grok_environment_error( exc, "error removing %s: " % directory)
mollstam/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/py2exe-0.6.8/py2exe/build_exe.py
Python
mit
65,585
[ "VisIt" ]
bbb981ffc8db6df0069a32cff7570a3e1702ef21264d2eb5478e0ca3ad2d9bb9
''' ======================================================================== count - Count reads per gene from BAM using UMIs and mapping coordinates ======================================================================== *Count the number of reads per gene based on the mapping co-ordinate and the UMI attached to the read* This tool is only designed to work with library preparation methods where the fragmentation occurs after amplification, as per most single cell RNA-Seq methods (e.g 10x, inDrop, Drop-seq, SCRB-seq and CEL-seq2). Since the precise mapping co-ordinate is not longer informative for such library preparations, it is simplified to the gene. This is a reasonable approach providing the number of available UMIs is sufficiently high and the sequencing depth is sufficiently low that the probability of two reads from the same gene having the same UMIs is acceptably low. If you want to count reads per gene for library preparations which fragment prior to amplification (e.g bulk RNA-Seq), please use ``umi_tools dedup`` to remove the duplicate reads as this will use the full information from the mapping co-ordinate. Then use a read counting tool such as FeatureCounts or HTSeq to count the reads per gene. In the rare case of bulk RNA-Seq using a library preparation method with fragmentation after amplification, one can still use ``count`` but note that it has not been tested on bulk RNA-Seq. This tool deviates from group and dedup in that the ``--per-gene`` option is hardcoded on. ''' import sys import collections import re import os # required to make iteritems python2 and python3 compatible from builtins import dict import pysam import numpy as np import umi_tools.Utilities as U import umi_tools.Documentation as Documentation import umi_tools.network as network import umi_tools.umi_methods as umi_methods import umi_tools.sam_methods as sam_methods # add the generic docstring text __doc__ = __doc__ + Documentation.GENERIC_DOCSTRING_GDC usage = ''' count - Count reads per-gene using UMI and mapping coordinates Usage: umi_tools count [OPTIONS] --stdin=IN_BAM [--stdout=OUT_BAM] note: If --stdout is ommited, standard out is output. To generate a valid BAM file on standard out, please redirect log with --log=LOGFILE or --log2stderr ''' def main(argv=None): """script main. parses command line options in sys.argv, unless *argv* is given. """ if argv is None: argv = sys.argv # setup command line parser parser = U.OptionParser(version="%prog version: $Id$", usage=usage, description=globals()["__doc__"]) group = U.OptionGroup(parser, "count-specific options") parser.add_option("--wide-format-cell-counts", dest="wide_format_cell_counts", action="store_true", default=False, help=("output the cell counts in a wide format " "(rows=genes, columns=cells)")) parser.add_option_group(group) # add common options (-h/--help, ...) and parse command line (options, args) = U.Start(parser, argv=argv, add_group_dedup_options=False) options.per_gene = True # hardcodes counting to per-gene only U.validateSamOptions(options, group=False) if options.random_seed: np.random.seed(options.random_seed) if options.stdin != sys.stdin: in_name = options.stdin.name options.stdin.close() else: raise ValueError("Input on standard in not currently supported") if options.in_sam: in_mode = "r" else: in_mode = "rb" infile = pysam.Samfile(in_name, in_mode) # write out to tempfile and then sort to stdout tmpfilename = U.getTempFilename(dir=options.tmpdir) tmpfile = U.openFile(tmpfilename, mode="w") nInput, nOutput, input_reads = 0, 0, 0 gene_tag = options.gene_tag metacontig2contig = None if options.chrom: inreads = infile.fetch(reference=options.chrom) else: if options.gene_transcript_map: metacontig2contig = sam_methods.getMetaContig2contig( infile, options.gene_transcript_map) metatag = "MC" inreads = sam_methods.metafetcher(infile, metacontig2contig, metatag) gene_tag = metatag else: inreads = infile.fetch() bundle_iterator = sam_methods.get_bundles( options, only_count_reads=True, metacontig_contig=metacontig2contig) # set up UMIClusterer functor with methods specific to # specified options.method processor = network.UMIClusterer(options.method) for bundle, key, status in bundle_iterator(inreads): if status == "single_read": continue gene, cell = key umis = bundle.keys() counts = {umi: bundle[umi]["count"] for umi in umis} nInput += sum(counts.values()) while nInput >= input_reads + 1000000: input_reads += 1000000 U.info("Parsed %i input reads" % input_reads) # group the umis groups = processor( counts, threshold=options.threshold) gene_count = len(groups) if options.per_cell: tmpfile.write("%s\n" % "\t".join((gene, cell.decode(), str(gene_count)))) else: tmpfile.write("%s\n" % "\t".join((gene, str(gene_count)))) nOutput += gene_count tmpfile.close() if options.per_cell: gene_counts_dict = {} with U.openFile(tmpfilename, mode="r") as inf: genes = set() cells = set() for line in inf: gene, cell, gene_count = line.strip().split("\t") genes.add(gene) cells.add(cell) if gene not in gene_counts_dict: gene_counts_dict[gene] = {} gene_counts_dict[gene][cell] = gene_count if options.wide_format_cell_counts: # write out in wide format options.stdout.write( "%s\t%s\n" % ("gene", "\t".join(sorted(cells)))) for gene in sorted(genes): counts = [] for cell in sorted(cells): if cell in gene_counts_dict[gene]: counts.append(gene_counts_dict[gene][cell]) else: counts.append(0) options.stdout.write( "%s\t%s\n" % (gene, "\t".join(map(str, counts)))) else: # write out in long format options.stdout.write("%s\t%s\t%s\n" % ("gene", "cell", "count")) for gene in sorted(genes): for cell in sorted(list(gene_counts_dict[gene].keys())): options.stdout.write("%s\t%s\t%s\n" % ( gene, cell, gene_counts_dict[gene][cell])) else: options.stdout.write("%s\t%s\n" % ("gene", "count")) with U.openFile(tmpfilename, mode="r") as inf: for line in inf: options.stdout.write(line) os.unlink(tmpfilename) # output reads events and benchmark information. for event in bundle_iterator.read_events.most_common(): U.info("%s: %s" % (event[0], event[1])) U.info("Number of (post deduplication) reads counted: %i" % nOutput) U.Stop() if __name__ == "__main__": sys.exit(main(sys.argv))
CGATOxford/UMI-tools
umi_tools/count.py
Python
mit
7,483
[ "HTSeq", "pysam" ]
5a0b5a1b51b84ccdc4761cb9a86c28d780e6634a1e0de6fd736a74a949e81ed2
from sympy.core.basic import Basic, S, C, sympify from sympy.core.function import Function from sympy.ntheory import sieve from math import sqrt from sympy.utilities.decorator import deprecated ############################################################################### ######################## FACTORIAL and MULTI-FACTORIAL ######################## ############################################################################### class Factorial(Function): """Implementation of factorial function over nonnegative integers. For the sake of convenience and simplicity of procedures using this function it is defined for negative integers and returns zero in this case. The factorial is very important in combinatorics where it gives the number of ways in which 'n' objects can be permuted. It also arises in calculus, probability, number theory etc. There is strict relation of factorial with gamma function. In fact n! = gamma(n+1) for nonnegarive integers. Rewrite of this kind is very useful in case of combinatorial simplification. Computation of the factorial is done using two algorithms. For small arguments naive product is evaluated. However for bigger input algorithm Prime-Swing is used. It is the fastest algorithm known and computes n! via prime factorization of special class of numbers, called here the 'Swing Numbers'. >>> from sympy import * >>> n = Symbol('n', integer=True) >>> factorial(-2) 0 >>> factorial(0) 1 >>> factorial(7) 5040 >>> factorial(n) n! >>> factorial(2*n) (2*n)! """ nargs = 1 _small_swing = [ 1,1,1,3,3,15,5,35,35,315,63,693,231,3003,429,6435,6435,109395, 12155,230945,46189,969969,88179,2028117,676039,16900975,1300075, 35102025,5014575,145422675,9694845,300540195,300540195 ] @classmethod def _swing(cls, n): if n < 33: return cls._small_swing[n] else: N, primes = int(sqrt(n)), [] for prime in sieve.primerange(3, N+1): p, q = 1, n while True: q //= prime if q > 0: if q & 1 == 1: p *= prime else: break if p > 1: primes.append(p) for prime in sieve.primerange(N+1, n//3 + 1): if (n // prime) & 1 == 1: primes.append(prime) L_product = R_product = 1 for prime in sieve.primerange(n//2 + 1, n+1): L_product *= prime for prime in primes: R_product *= prime return L_product*R_product @classmethod def _recursive(cls, n): if n < 2: return 1 else: return (cls._recursive(n//2)**2)*cls._swing(n) @classmethod @deprecated def canonize(cls, n): return cls.eval(n) @classmethod def eval(cls, n): n = sympify(n) if n.is_Number: if n is S.Zero: return S.One elif n.is_Integer: if n.is_negative: return S.Zero else: n, result = n.p, 1 if n < 20: for i in range(2, n+1): result *= i else: N, bits = n, 0 while N != 0: if N & 1 == 1: bits += 1 N = N >> 1 result = cls._recursive(n)*2**(n-bits) return C.Integer(result) if n.is_integer: if n.is_negative: return S.Zero else: return C.gamma(n+1) @classmethod # ? def _eval_rewrite_as_gamma(self, arg): return C.gamma(1 + arg) def _eval_is_integer(self): return self.args[0].is_integer class MultiFactorial(Function): pass factorial = Factorial ############################################################################### ######################## RISING and FALLING FACTORIALS ######################## ############################################################################### class RisingFactorial(Function): """Rising factorial (also called Pochhammer symbol) is a double valued function arising in concrete mathematics, hypergeometric functions and series expanansions. It is defined by rf(x, k) = x * (x+1) * ... * (x + k-1) where 'x' can be arbitrary expression and 'k' is an integer. For more information check "Concrete mathematics" by Graham, pp. 66 or visit http://mathworld.wolfram.com/RisingFactorial.html page. >>> from sympy import * >>> x = Symbol('x') >>> rf(x, 0) 1 >>> rf(1, 5) 120 >>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x) True """ nargs = 2 @classmethod @deprecated def canonize(cls, x, k): return cls.eval(x, k) @classmethod def eval(cls, x, k): x = sympify(x) k = sympify(k) if x is S.NaN: return S.NaN elif x is S.One: return factorial(k) elif k.is_Integer: if k is S.NaN: return S.NaN elif k is S.Zero: return S.One else: if k.is_positive: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: if k.is_odd: return S.NegativeInfinity else: return S.Infinity else: return reduce(lambda r, i: r*(x+i), xrange(0, int(k)), 1) else: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: return S.Infinity else: return 1/reduce(lambda r, i: r*(x-i), xrange(1, abs(int(k))+1), 1) def _eval_rewrite_as_gamma(self, x, k): return C.gamma(x + k) / C.gamma(x) class FallingFactorial(Function): """Falling factorial (related to rising factorial) is a double valued function arising in concrete mathematics, hypergeometric functions and series expanansions. It is defined by ff(x, k) = x * (x-1) * ... * (x - k+1) where 'x' can be arbitrary expression and 'k' is an integer. For more information check "Concrete mathematics" by Graham, pp. 66 or visit http://mathworld.wolfram.com/FallingFactorial.html page. >>> from sympy import * >>> x = Symbol('x') >>> ff(x, 0) 1 >>> ff(5, 5) 120 >>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4) True """ nargs = 2 @classmethod @deprecated def canonize(cls, x, k): return cls.eval(x, k) @classmethod def eval(cls, x, k): x = sympify(x) k = sympify(k) if x is S.NaN: return S.NaN elif k.is_Integer: if k is S.NaN: return S.NaN elif k is S.Zero: return S.One else: result = S.One if k.is_positive: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: if k.is_odd: return S.NegativeInfinity else: return S.Infinity else: return reduce(lambda r, i: r*(x-i), xrange(0, int(k)), 1) else: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: return S.Infinity else: return 1/reduce(lambda r, i: r*(x+i), xrange(1, abs(int(k))+1), 1) def _eval_rewrite_as_gamma(self, x, k): return (-1)**k * C.gamma(-x + k) / C.gamma(-x) rf = RisingFactorial ff = FallingFactorial ############################################################################### ########################### BINOMIAL COEFFICIENTS ############################# ############################################################################### class Binomial(Function): """Implementation of the binomial coefficient. It can be defined in two ways depending on its desired interpretation: C(n,k) = n!/(k!(n-k)!) or C(n, k) = ff(n, k)/k! First formula has strict combinatorial meaning, definig the number of ways we can choose 'k' elements from 'n' element set. In this case both arguments are nonnegative integers and binomial is computed using efficient algorithm based on prime factorisation. The other definition is generalisation for arbitaty 'n', however 'k' must be also nonnegative. This case is very useful in case for evaluating summations. For the sake of convenience for negative 'k' this function will return zero no matter what valued is the other argument. >>> from sympy import * >>> n = symbols('n', integer=True) >>> binomial(15, 8) 6435 >>> binomial(n, -1) 0 >>> [ binomial(0, i) for i in range(1)] [1] >>> [ binomial(1, i) for i in range(2)] [1, 1] >>> [ binomial(2, i) for i in range(3)] [1, 2, 1] >>> [ binomial(3, i) for i in range(4)] [1, 3, 3, 1] >>> [ binomial(4, i) for i in range(5)] [1, 4, 6, 4, 1] >>> binomial(Rational(5,4), 3) -5/128 >>> binomial(n, 3) n*(1 - n)*(2 - n)/6 """ nargs = 2 @classmethod @deprecated def canonize(cls, r, k): return cls.eval(r, k) @classmethod def eval(cls, r, k): r, k = map(sympify, (r, k)) if k.is_Number: if k is S.Zero: return S.One elif k.is_Integer: if k.is_negative: return S.Zero else: if r.is_Integer and r.is_nonnegative: r, k = int(r), int(k) if k > r: return S.Zero elif k > r // 2: k = r - k M, result = int(sqrt(r)), 1 for prime in sieve.primerange(2, r+1): if prime > r - k: result *= prime elif prime > r // 2: continue elif prime > M: if r % prime < k % prime: result *= prime else: R, K = r, k exp = a = 0 while R > 0: a = int((R % prime) < (K % prime + a)) R, K = R // prime, K // prime exp = a + exp if exp > 0: result *= prime**exp return C.Integer(result) else: result = r - k + 1 for i in xrange(2, k+1): result *= r-k+i result /= i return result if k.is_integer: if k.is_negative: return S.Zero else: return C.gamma(r+1)/(C.gamma(r-k+1)*C.gamma(k+1)) def _eval_rewrite_as_gamma(self, r, k): return C.gamma(r+1) / (C.gamma(r-k+1)*C.gamma(k+1)) def _eval_is_integer(self): return self.args[0].is_integer and self.args[1].is_integer binomial = Binomial
hazelnusse/sympy-old
sympy/functions/combinatorial/factorials.py
Python
bsd-3-clause
12,446
[ "VisIt" ]
2755d8f6fdecaa898aa5c1441b565ede4a83ba391451dee3dde9fb6334e5ac5a
# copies.py - copy detection for Mercurial # # Copyright 2008 Matt Mackall <mpm@selenic.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. import util import heapq def _nonoverlap(d1, d2, d3): "Return list of elements in d1 not in d2 or d3" return sorted([d for d in d1 if d not in d3 and d not in d2]) def _dirname(f): s = f.rfind("/") if s == -1: return "" return f[:s] def _dirs(files): d = set() for f in files: f = _dirname(f) while f not in d: d.add(f) f = _dirname(f) return d def _findlimit(repo, a, b): """Find the earliest revision that's an ancestor of a or b but not both, None if no such revision exists. """ # basic idea: # - mark a and b with different sides # - if a parent's children are all on the same side, the parent is # on that side, otherwise it is on no side # - walk the graph in topological order with the help of a heap; # - add unseen parents to side map # - clear side of any parent that has children on different sides # - track number of interesting revs that might still be on a side # - track the lowest interesting rev seen # - quit when interesting revs is zero cl = repo.changelog working = len(cl) # pseudo rev for the working directory if a is None: a = working if b is None: b = working side = {a: -1, b: 1} visit = [-a, -b] heapq.heapify(visit) interesting = len(visit) hascommonancestor = False limit = working while interesting: r = -heapq.heappop(visit) if r == working: parents = [cl.rev(p) for p in repo.dirstate.parents()] else: parents = cl.parentrevs(r) for p in parents: if p < 0: continue if p not in side: # first time we see p; add it to visit side[p] = side[r] if side[p]: interesting += 1 heapq.heappush(visit, -p) elif side[p] and side[p] != side[r]: # p was interesting but now we know better side[p] = 0 interesting -= 1 hascommonancestor = True if side[r]: limit = r # lowest rev visited interesting -= 1 if not hascommonancestor: return None return limit def copies(repo, c1, c2, ca, checkdirs=False): """ Find moves and copies between context c1 and c2 """ # avoid silly behavior for update from empty dir if not c1 or not c2 or c1 == c2: return {}, {} # avoid silly behavior for parent -> working dir if c2.node() is None and c1.node() == repo.dirstate.parents()[0]: return repo.dirstate.copies(), {} limit = _findlimit(repo, c1.rev(), c2.rev()) if limit is None: # no common ancestor, no copies return {}, {} m1 = c1.manifest() m2 = c2.manifest() ma = ca.manifest() def makectx(f, n): if len(n) != 20: # in a working context? if c1.rev() is None: return c1.filectx(f) return c2.filectx(f) return repo.filectx(f, fileid=n) ctx = util.lrucachefunc(makectx) copy = {} fullcopy = {} diverge = {} def related(f1, f2, limit): # Walk back to common ancestor to see if the two files originate # from the same file. Since workingfilectx's rev() is None it messes # up the integer comparison logic, hence the pre-step check for # None (f1 and f2 can only be workingfilectx's initially). if f1 == f2: return f1 # a match g1, g2 = f1.ancestors(), f2.ancestors() try: f1r, f2r = f1.rev(), f2.rev() if f1r is None: f1 = g1.next() if f2r is None: f2 = g2.next() while 1: f1r, f2r = f1.rev(), f2.rev() if f1r > f2r: f1 = g1.next() elif f2r > f1r: f2 = g2.next() elif f1 == f2: return f1 # a match elif f1r == f2r or f1r < limit or f2r < limit: return False # copy no longer relevant except StopIteration: return False def checkcopies(f, m1, m2): '''check possible copies of f from m1 to m2''' of = None seen = set([f]) for oc in ctx(f, m1[f]).ancestors(): ocr = oc.rev() of = oc.path() if of in seen: # check limit late - grab last rename before if ocr < limit: break continue seen.add(of) fullcopy[f] = of # remember for dir rename detection if of not in m2: continue # no match, keep looking if m2[of] == ma.get(of): break # no merge needed, quit early c2 = ctx(of, m2[of]) cr = related(oc, c2, ca.rev()) if cr and (of == f or of == c2.path()): # non-divergent copy[f] = of of = None break if of in ma: diverge.setdefault(of, []).append(f) repo.ui.debug(" searching for copies back to rev %d\n" % limit) u1 = _nonoverlap(m1, m2, ma) u2 = _nonoverlap(m2, m1, ma) if u1: repo.ui.debug(" unmatched files in local:\n %s\n" % "\n ".join(u1)) if u2: repo.ui.debug(" unmatched files in other:\n %s\n" % "\n ".join(u2)) for f in u1: checkcopies(f, m1, m2) for f in u2: checkcopies(f, m2, m1) diverge2 = set() for of, fl in diverge.items(): if len(fl) == 1: del diverge[of] # not actually divergent else: diverge2.update(fl) # reverse map for below if fullcopy: repo.ui.debug(" all copies found (* = to merge, ! = divergent):\n") for f in fullcopy: note = "" if f in copy: note += "*" if f in diverge2: note += "!" repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note)) del diverge2 if not fullcopy or not checkdirs: return copy, diverge repo.ui.debug(" checking for directory renames\n") # generate a directory move map d1, d2 = _dirs(m1), _dirs(m2) invalid = set() dirmove = {} # examine each file copy for a potential directory move, which is # when all the files in a directory are moved to a new directory for dst, src in fullcopy.iteritems(): dsrc, ddst = _dirname(src), _dirname(dst) if dsrc in invalid: # already seen to be uninteresting continue elif dsrc in d1 and ddst in d1: # directory wasn't entirely moved locally invalid.add(dsrc) elif dsrc in d2 and ddst in d2: # directory wasn't entirely moved remotely invalid.add(dsrc) elif dsrc in dirmove and dirmove[dsrc] != ddst: # files from the same directory moved to two different places invalid.add(dsrc) else: # looks good so far dirmove[dsrc + "/"] = ddst + "/" for i in invalid: if i in dirmove: del dirmove[i] del d1, d2, invalid if not dirmove: return copy, diverge for d in dirmove: repo.ui.debug(" dir %s -> %s\n" % (d, dirmove[d])) # check unaccounted nonoverlapping files against directory moves for f in u1 + u2: if f not in fullcopy: for d in dirmove: if f.startswith(d): # new file added in a directory that was moved, move it df = dirmove[d] + f[len(d):] if df not in copy: copy[f] = df repo.ui.debug(" file %s -> %s\n" % (f, copy[f])) break return copy, diverge
joewalnes/idea-community
plugins/hg4idea/testData/bin/mercurial/copies.py
Python
apache-2.0
8,242
[ "VisIt" ]
04efba43bee6927732e632a95c90bb0b34a26658145f9265eb9b2ce45c0e37ee
""" Migration script to create the migrate_tools table. """ from sqlalchemy import * from sqlalchemy.orm import * from migrate import * from migrate.changeset import * import datetime now = datetime.datetime.utcnow # Need our custom types, but don't import anything else from model from galaxy.model.custom_types import * import sys, logging log = logging.getLogger( __name__ ) log.setLevel(logging.DEBUG) handler = logging.StreamHandler( sys.stdout ) format = "%(name)s %(levelname)s %(asctime)s %(message)s" formatter = logging.Formatter( format ) handler.setFormatter( formatter ) log.addHandler( handler ) metadata = MetaData() MigrateTools_table = Table( "migrate_tools", metadata, Column( "repository_id", TrimmedString( 255 ) ), Column( "repository_path", TEXT ), Column( "version", Integer ) ) def upgrade(migrate_engine): metadata.bind = migrate_engine print __doc__ metadata.reflect() # Create the table. try: MigrateTools_table.create() cmd = "INSERT INTO migrate_tools VALUES ('GalaxyTools', 'lib/galaxy/tool_shed/migrate', %d)" % 1 migrate_engine.execute( cmd ) except Exception, e: log.debug( "Creating migrate_tools table failed: %s" % str( e ) ) def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() try: MigrateTools_table.drop() except Exception, e: log.debug( "Dropping migrate_tools table failed: %s" % str( e ) )
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/lib/galaxy/model/migrate/versions/0092_add_migrate_tools_table.py
Python
gpl-3.0
1,469
[ "Galaxy" ]
2ed4bccf9d2f802a616ee80c26769d9fb545070e92999c815b8952801f1091ee
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import unicode_literals import os from unittest import TestCase import unittest2 as unittest from pymatgen.analysis.molecule_structure_comparator import \ MoleculeStructureComparator from pymatgen.core.structure import Molecule from pymatgen.io.qchem import QcOutput __author__ = 'xiaohuiqu' test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_files", "molecules", "structural_change") class TestMoleculeStructureComparator(TestCase): def test_are_equal(self): msc1 = MoleculeStructureComparator() mol1 = Molecule.from_file(os.path.join(test_dir, "t1.xyz")) mol2 = Molecule.from_file(os.path.join(test_dir, "t2.xyz")) mol3 = Molecule.from_file(os.path.join(test_dir, "t3.xyz")) self.assertFalse(msc1.are_equal(mol1, mol2)) self.assertTrue(msc1.are_equal(mol2, mol3)) thio1 = Molecule.from_file(os.path.join(test_dir, "thiophene1.xyz")) thio2 = Molecule.from_file(os.path.join(test_dir, "thiophene2.xyz")) # noinspection PyProtectedMember msc2 = MoleculeStructureComparator( priority_bonds=msc1._get_bonds(thio1)) self.assertTrue(msc2.are_equal(thio1, thio2)) hal1 = Molecule.from_file(os.path.join(test_dir, "molecule_with_halogen_bonds_1.xyz")) hal2 = Molecule.from_file(os.path.join(test_dir, "molecule_with_halogen_bonds_2.xyz")) msc3 = MoleculeStructureComparator(priority_bonds=msc1._get_bonds(hal1)) self.assertTrue(msc3.are_equal(hal1, hal2)) def test_get_bonds(self): mol1 = Molecule.from_file(os.path.join(test_dir, "t1.xyz")) msc = MoleculeStructureComparator() # noinspection PyProtectedMember bonds = msc._get_bonds(mol1) bonds_ref = [(0, 1), (0, 2), (0, 3), (0, 23), (3, 4), (3, 5), (5, 6), (5, 7), (7, 8), (7, 9), (7, 21), (9, 10), (9, 11), (9, 12), (12, 13), (12, 14), (12, 15), (15, 16), (15, 17), (15, 18), (18, 19), (18, 20), (18, 21), (21, 22), (21, 23), (23, 24), (23, 25)] self.assertEqual(bonds, bonds_ref) mol2 = Molecule.from_file(os.path.join(test_dir, "MgBH42.xyz")) bonds = msc._get_bonds(mol2) self.assertEqual(bonds, [(1, 3), (2, 3), (3, 4), (3, 5), (6, 8), (7, 8), (8, 9), (8, 10)]) msc = MoleculeStructureComparator(ignore_ionic_bond=False) bonds = msc._get_bonds(mol2) self.assertEqual(bonds, [(0, 1), (0, 2), (0, 3), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (2, 3), (3, 4), (3, 5), (6, 8), (7, 8), (8, 9), (8, 10)]) mol1 = Molecule.from_file(os.path.join(test_dir, "molecule_with_halogen_bonds_1.xyz")) msc = MoleculeStructureComparator() # noinspection PyProtectedMember bonds = msc._get_bonds(mol1) self.assertEqual(bonds, [(0, 12), (0, 13), (0, 14), (0, 15), (1, 12), (1, 16), (1, 17), (1, 18), (2, 4), (2, 11), (2, 19), (3, 5), (3, 10), (3, 20), (4, 6), (4, 10), (5, 11), (5, 12), (6, 7), (6, 8), (6, 9)]) def test_to_and_from_dict(self): msc1 = MoleculeStructureComparator() d1 = msc1.as_dict() d2 = MoleculeStructureComparator.from_dict(d1).as_dict() self.assertEqual(d1, d2) thio1 = Molecule.from_file(os.path.join(test_dir, "thiophene1.xyz")) # noinspection PyProtectedMember msc2 = MoleculeStructureComparator( bond_length_cap=0.2, priority_bonds=msc1._get_bonds(thio1), priority_cap=0.5) d1 = msc2.as_dict() d2 = MoleculeStructureComparator.from_dict(d1).as_dict() self.assertEqual(d1, d2) def test_structural_change_in_geom_opt(self): qcout_path = os.path.join(test_dir, "mol_1_3_bond.qcout") qcout = QcOutput(qcout_path) mol1 = qcout.data[0]["molecules"][0] mol2 = qcout.data[0]["molecules"][-1] priority_bonds = [[0, 1], [0, 2], [1, 3], [1, 4], [1, 7], [2, 5], [2, 6], [2, 8], [4, 6], [4, 10], [6, 9]] msc = MoleculeStructureComparator(priority_bonds=priority_bonds) self.assertTrue(msc.are_equal(mol1, mol2)) def test_get_13_bonds(self): priority_bonds = [[0, 1], [0, 2], [1, 3], [1, 4], [1, 7], [2, 5], [2, 6], [2, 8], [4, 6], [4, 10], [6, 9]] bonds_13 = MoleculeStructureComparator.get_13_bonds(priority_bonds) ans = ((0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (1, 2), (1, 6), (1, 10), (2, 4), (2, 9), (3, 4), (3, 7), (4, 7), (4, 9), (5, 6), (5, 8), (6, 8), (6, 10)) self.assertEqual(bonds_13, tuple(ans)) if __name__ == '__main__': unittest.main()
aykol/pymatgen
pymatgen/analysis/tests/test_molecule_structure_comparator.py
Python
mit
4,991
[ "pymatgen" ]
13f327786f581c5f029cbe4f7e9f54df3fe7fea77ec1bb854917164d4ed45d7e
import numpy from sandbox.kernel.AbstractKernel import AbstractKernel from sandbox.util.Parameter import Parameter class GaussianKernel(AbstractKernel): """ A class to find gaussian kernel evaluations k(x, y) = exp (-||x - y||^2/2 sigma^2) """ def __init__(self, sigma=1.0): """ Initialise object with given value of sigma >= 0 :param sigma: kernel width parameter. :type sigma: :class:`float` """ self.setSigma(sigma) def evaluate(self, X1, X2): """ Find kernel evaluation between two matrices X1 and X2 whose rows are examples and have an identical number of columns. :param X1: First set of examples. :type X1: :class:`numpy.ndarray` :param X2: Second set of examples. :type X2: :class:`numpy.ndarray` """ Parameter.checkClass(X1, numpy.ndarray) Parameter.checkClass(X2, numpy.ndarray) if X1.shape[1] != X2.shape[1]: raise ValueError("Invalid matrix dimentions: " + str(X1.shape) + " " + str(X2.shape)) j1 = numpy.ones((X1.shape[0], 1)) j2 = numpy.ones((X2.shape[0], 1)) diagK1 = numpy.sum(X1**2, 1) diagK2 = numpy.sum(X2**2, 1) X1X2 = numpy.dot(X1, X2.T) Q = (2*X1X2 - numpy.outer(diagK1, j2) - numpy.outer(j1, diagK2) )/ (2*self.sigma**2) return numpy.exp(Q) def setSigma(self, sigma): """ Set the sigma parameter. :param sigma: kernel width parameter. :type sigma: :class:`float` """ Parameter.checkFloat(sigma, 0.0, float('inf')) if sigma == 0.0: raise ValueError("Sigma cannot be zero") self.sigma = sigma def __str__(self): return "GaussianKernel: sigma = " + str(self.sigma)
charanpald/sandbox
sandbox/kernel/GaussianKernel.py
Python
gpl-3.0
1,829
[ "Gaussian" ]
b39d3bc03a1a3383993e1303009e4c172326e4502e4fd01a8f3ce0f264ec3f10
""" Miscellaneous code to run benchmarks and operate on mpsrime files. """ from __future__ import print_function from __future__ import division import argparse import os.path import tempfile import time import msprime import Bio from Bio import Phylo import dendropy import ete2 import numpy as np def run_compress(args): tree_sequence = msprime.load(args.infile) tree_sequence.dump(args.outfile, zlib_compression=True) def run_mutate(args): tree_sequence = msprime.load(args.infile) tree_sequence.generate_mutations(args.mutation_rate, random_seed=1) tree_sequence.dump(args.outfile) print("Generated ", tree_sequence.get_num_mutations(), "mutations") def run_benchmark_trees(args): megabytes = 1024 * 1024 gigabytes = megabytes * 1024 terabytes = gigabytes * 1024 max_trees = args.num_trees tree_sequence = msprime.load(args.infile) file_size = os.path.getsize(args.infile) # Now benchmark the native reading before = time.clock() total_trees = 0 for t in tree_sequence.trees(): total_trees += 1 duration = time.clock() - before print("Read", total_trees, "trees in ", duration, "seconds") # Write out the first max_trees in Newick to a temporary file. with tempfile.NamedTemporaryFile() as f: num_trees = 0 for _, ns in tree_sequence.newick_trees(10): print(ns, file=f) num_trees += 1 if num_trees == max_trees: break f.flush() size = f.tell() estimated_size = size * total_trees / max_trees print( "Estimated total Newick size = ", estimated_size / terabytes, "TB") print("Appoximate difference = ", estimated_size / file_size) # BioPython f.seek(0) before = time.clock() num_trees = 0 for tree in Phylo.parse(f, "newick"): num_trees += 1 assert num_trees == max_trees avg_duration = (time.clock() - before) / max_trees print( "Read newick trees in appox {} seconds per tree with BioPython {}".format( avg_duration, Bio.__version__)) days = total_trees * avg_duration / (60 * 60 * 24) print("\tEstimated time is {} days".format(days)) # Dendropy f.seek(0) before = time.clock() num_trees = 0 for line in f: t = dendropy.Tree.get_from_string(line, schema="newick") num_trees += 1 assert num_trees == max_trees avg_duration = (time.clock() - before) / max_trees print( "Read newick trees in appox {} seconds per tree with Dendropy {}".format( avg_duration, dendropy.__version__)) # ETE f.seek(0) before = time.clock() num_trees = 0 for line in f: t = ete2.Tree(line) num_trees += 1 assert num_trees == max_trees avg_duration = (time.clock() - before) / max_trees print( "Read newick trees in appox {} seconds per tree with ETE {}".format( avg_duration, ete2.__version__)) def run_write_ped(args): tree_sequence = msprime.load(args.infile) # Write the ped file. s = tree_sequence.get_num_mutations() indices = np.arange(0, s) filename = "{}.ped".format(args.prefix) with open(filename, "w") as f: for j, h in enumerate(tree_sequence.haplotypes(), 1): family_id = j ind_id = j paternal_id = 0 maternal_id = 0 sex = 1 # 2 == case, 1 == control phenotype = 2 if j <= args.num_cases else 1 print( family_id, ind_id, paternal_id, maternal_id, sex, phenotype, end=" ", file=f) # Neat trick for converting numerical strings to numpy array: # interpret data as raw chars, and subtrace ascii code. nph = np.fromstring(h, np.int8) - ord('0') # Plink genotypes are 1/2, not 0/1 nph += 1 genotypes = np.zeros(4 * s, dtype="S1") genotypes[indices * 4] = nph genotypes[indices * 4 + 1] = " " genotypes[indices * 4 + 2] = nph genotypes[indices * 4 + 3] = " " genotypes.tofile(f) # Remove trailing space f.seek(-1, os.SEEK_CUR) print(file=f) # Make a map file. We're using haploid data, so we put it on the # male X chromosome. filename = "{}.map".format(args.prefix) with open(filename, "w") as f: for j in range(tree_sequence.get_num_mutations()): print("X", "rs{}".format(j), 0, j + 1, file=f) def run_gwas(args): tree_sequence = msprime.load(args.infile) num_cases = args.num_cases site = 0 n = tree_sequence.get_sample_size() cases = list(range(1, num_cases + 1)) with open(args.outfile, "w") as output: for tree in tree_sequence.trees(cases): for pos, node in tree.mutations(): num_leaves = tree.get_num_leaves(node) cases_with_mut = tree.get_num_tracked_leaves(node) controls_with_mut = tree.get_num_leaves(node) - cases_with_mut f_cases = cases_with_mut / num_cases f_controls = controls_with_mut / (n - num_cases) if num_leaves >= n / 2: # The mutation is the major allele a1 = 1 a2 = 2 fa = 1 - f_cases fu = 1 - f_controls else: # The mutation is the minor allele a1 = 2 a2 = 1 fa = f_cases fu = f_controls case_odds = fa / (1 - fa) control_odds = fu / (1 - fu) odds_ratio = "NA" if control_odds == 0 else case_odds / control_odds print( site + 1, a1, fa, fu, a2, odds_ratio, sep="\t", file=output) def main(): parser = argparse.ArgumentParser( description="Command line interface examples.") # This is required to get uniform behaviour in Python2 and Python3 subparsers = parser.add_subparsers(dest="subcommand") subparsers.required = True compress_parser = subparsers.add_parser( "compress", help="Compress the input and write to the output.") compress_parser.add_argument("infile") compress_parser.add_argument("outfile") compress_parser.set_defaults(runner=run_compress) mutate_parser = subparsers.add_parser( "mutate", help="Add mutations") mutate_parser.add_argument("infile") mutate_parser.add_argument("outfile") mutate_parser.add_argument( "--mutation-rate", "-u", type=float, default=0, help=( "The rate at which mutations occur within a single locus " "in units of 4N generations")) mutate_parser.set_defaults(runner=run_mutate) benchmark_trees_parser = subparsers.add_parser( "benchmark-trees", help="Benchmarks tree processing.") benchmark_trees_parser.add_argument("infile") benchmark_trees_parser.add_argument( "--num-trees", "-n", type=int, default=10) benchmark_trees_parser.set_defaults(runner=run_benchmark_trees) write_ped_parser = subparsers.add_parser( "write-ped", help="Converts history file to PED format.") write_ped_parser.add_argument("infile") write_ped_parser.add_argument("prefix") write_ped_parser.add_argument( "--num-cases", "-n", type=int, default=10) write_ped_parser.set_defaults(runner=run_write_ped) gwas_parser = subparsers.add_parser( "gwas", help="Runs an association test and writes the results to stdout") gwas_parser.add_argument("infile") gwas_parser.add_argument("outfile") gwas_parser.add_argument( "--num-cases", "-n", type=int, default=10) gwas_parser.set_defaults(runner=run_gwas) args = parser.parse_args() args.runner(args) if __name__ == "__main__": main()
jeromekelleher/msprime-paper
examples/examples.py
Python
gpl-3.0
8,175
[ "Biopython" ]
57c7a1990a7a9919c0eaea44674488798712244fb463d3c0345be2c4e27d931b
import os, sys, re import urllib2 from bs4 import BeautifulSoup class mineApps: def __init__(self, pagePath): self.page = pagePath #read the page provided during object creation @staticmethod def set_buffer(pageURL): soup = BeautifulSoup(urllib2.urlopen(pageURL).read()) return soup #set an object for the page given. put the details in that object def fetch_page_details(self): try : pager = mineApps.set_buffer(self.page) return (0, pager) except Exception as e: print sys.exc_info()[0], e return (-1, "Not able to fetch page details") #fetch all the app id urls from the above url def fetch_app_url(self, soup, show_more=False): try : #see more urls moreHref = '/store/recommended\?' seeMoreArray = [] #fetch app urls listed on each page linkArray = [] hrefVal = '/store/apps/details\?id=' soup.prettify() #fetcg tge links of all apps on the page for link in soup.find_all('a', href=re.compile(hrefVal)): #a tag, class name if link.get('href') not in linkArray : linkArray.append(link.get('href')) #fetch links of the show more fields and return them if show_more : print "coming inside if" for seeMore in soup.find_all('a', class_="see-more play-button small apps"): if seeMore.get('href') not in seeMoreArray : seeMoreArray.append(seeMore.get('href')) else : seeMoreArray = None #print linkArray return(0, linkArray, seeMoreArray) except Exception as e : print sys.exc_info()[0], e return(-1, "Not able to fetch app URL") #fetch the individual app details. def fetch_app_details(self, uri): try: #puts app_id, developer_id, app_name, app_rating, app_description, app category, app_icon_url app_url = "" app_id = app_name = app_rating = category = description = app_downloads = app_icon_url = None package_name = developer_id = None uri.strip() if "https://play.google.com" not in uri : app_url = "https://play.google.com" + uri else : app_url = uri detailer = mineApps.set_buffer(app_url) detailer.prettify() #app_id package name hrefVal = re.compile('/store/apps/details\?id=') app_id = hrefVal.sub('', app_url) #developer id hrefDev = '/store/apps/developer\?id=' dev_href= detailer.find('a', href=re.compile(hrefDev)) devIDURL = re.compile(hrefDev) developer_id = devIDURL.sub('', dev_href.get('href')) #author_f_name, author_l_name developerName = developer_id #author_f_name, author_l_name = developerName.split('+') #app name app_name = (detailer.find(itemprop="name", class_="document-title").text).strip() #app rating app_rating = (detailer.find(class_="score").text).strip() #app category category = "" # a bit of dance for games category game_cats = ["Action", "Adventure", "Arcade", "Board", "Card", "Casino", "Casual", "Educational", "Family", "Music", "Puzzle", "Racing", "Role Playing", "Simulation", "Sports", "Strategy", "Trivia", "Widgets", "Word"] catg = detailer.find(itemprop="genre").text if catg in game_cats : category = "Games" else : category = catg #app description description = detailer.find("div",class_="description").text #app icon url image = detailer.find(itemprop="image") app_icon_url = image["src"] #number of downloads app_downloads = (detailer.find("div",class_="stars-count").text).strip() #app version app_version = (detailer.find(itemprop="softwareVersion", class_="content").text).strip() #number of installations app_num_installs = (detailer.find(itemprop="numDownloads", class_="content").text).strip() #developer email mailHref = "mailto:" link_email = detailer.find('a', href=re.compile(mailHref), class_="dev-link") dev_email = (link_email.get('href')).replace('mailto:', '') #return all the details fetched jsonString = { "app_name" : app_name, "app_id" : app_id, "package_name": app_id, "developer_id": developer_id, "app_rating" : app_rating, "app_category": category, "app_icon_url": app_icon_url, "app_downloads": app_downloads, "description" : description, "app_version" : app_version, "app_num_installs": app_num_installs, "dev_email" : dev_email } #if None not in (app_id, app_name, app_rating, category, description, app_downloads, app_icon_url, developer_id) : return(0, jsonString) except Exception as e: print sys.exc_info()[0], e return (-1, "Not able to fetch anything from this page") #fetch the details of apps in the page in a search result. # Ex : search for ingress and fetch details of all apps related to ingress displayed on that page. def fetch_related_app_details(self, searchStr): try : uri = "https://play.google.com/store/search?q=" searchUrl = uri + '%20'.join(searchStr) searcher = mineApps(searchUrl) status, searcherObj = searcher.fetch_page_details() status, searchArray = searcher.fetch_app_url(searcherObj) return (status, searchArray) except Exception as e : print sys.exc_info()[0], e return(-1, "not able to fetch related app details") fetch_page_details = mineApps.fetch_page_details fetch_app_url = mineApps.fetch_app_url fetch_app_details = mineApps.fetch_app_details fetch_related_app_details = mineApps.fetch_related_app_details
bharathkallurs/appsminer
appMiner.py
Python
bsd-3-clause
5,373
[ "CASINO" ]
766a201ad43f4286b868e9ed88a08c30c80490881ed184e2247f6ea41b919062
import pickle import gzip import os, sys, errno import time import math # numpy & theano imports need to be done in this order (only for some numpy installations, not sure why) import numpy # we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself import numpy.distutils.__config__ # and only after that can we import theano import theano from utils.providers import ListDataProviderWithProjectionIndex, expand_projection_inputs, get_unexpanded_projection_inputs # ListDataProvider from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation from frontend.silence_remover import SilenceRemover from frontend.silence_remover import trim_silence from frontend.min_max_norm import MinMaxNormalisation #from frontend.acoustic_normalisation import CMPNormalisation from frontend.acoustic_composition import AcousticComposition from frontend.parameter_generation import ParameterGeneration #from frontend.feature_normalisation_base import FeatureNormBase from frontend.mean_variance_norm import MeanVarianceNorm # the new class for label composition and normalisation from frontend.label_composer import LabelComposer import configuration from models.dnn import DNN from models.tpdnn import TokenProjectionDNN from models.ms_dnn import MultiStreamDNN from models.ms_dnn_gv import MultiStreamDNNGv from models.sdae import StackedDenoiseAutoEncoder from utils.compute_distortion import DistortionComputation, IndividualDistortionComp from utils.generate import generate_wav from utils.learn_rates import ExpDecreaseLearningRate #import matplotlib.pyplot as plt # our custom logging class that can also plot #from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot import logging # as logging import logging.config import io ## This should always be True -- tidy up later expand_by_minibatch = True if expand_by_minibatch: proj_type = 'int32' else: proj_type = theano.config.floatX def extract_file_id_list(file_list): file_id_list = [] for file_name in file_list: file_id = os.path.basename(os.path.splitext(file_name)[0]) file_id_list.append(file_id) return file_id_list def read_file_list(file_name): logger = logging.getLogger("read_file_list") file_lists = [] fid = open(file_name) for line in fid.readlines(): line = line.strip() if len(line) < 1: continue file_lists.append(line) fid.close() logger.debug('Read file list from %s' % file_name) return file_lists def make_output_file_list(out_dir, in_file_lists): out_file_lists = [] for in_file_name in in_file_lists: file_id = os.path.basename(in_file_name) out_file_name = out_dir + '/' + file_id out_file_lists.append(out_file_name) return out_file_lists def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True): if not os.path.exists(file_dir) and new_dir_switch: os.makedirs(file_dir) file_name_list = [] for file_id in file_id_list: file_name = file_dir + '/' + file_id + file_extension file_name_list.append(file_name) return file_name_list def visualize_dnn(dnn): layer_num = len(dnn.params) / 2 ## including input and output for i in range(layer_num): fig_name = 'Activation weights W' + str(i) fig_title = 'Activation weights of W' + str(i) xlabel = 'Neuron index of hidden layer ' + str(i) ylabel = 'Neuron index of hidden layer ' + str(i+1) if i == 0: xlabel = 'Input feature index' if i == layer_num-1: ylabel = 'Output feature index' logger.create_plot(fig_name, SingleWeightMatrixPlot) plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i*2].get_value(borrow=True).T) plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel) ## Function for training projection and non-projection parts at same time def train_DNN(train_xy_file_list, valid_xy_file_list, \ nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False): # get loggers for this function # this one writes to both console and file logger = logging.getLogger("main.train_DNN") logger.debug('Starting train_DNN') if plot: # this one takes care of plotting duties plotlogger = logging.getLogger("plotting") # create an (empty) plot of training convergence, ready to receive data points logger.create_plot('training convergence',MultipleSeriesPlot) try: assert numpy.sum(ms_outs) == n_outs except AssertionError: logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs)) raise ####parameters##### finetune_lr = float(hyper_params['learning_rate']) training_epochs = int(hyper_params['training_epochs']) batch_size = int(hyper_params['batch_size']) l1_reg = float(hyper_params['l1_reg']) l2_reg = float(hyper_params['l2_reg']) private_l2_reg = float(hyper_params['private_l2_reg']) warmup_epoch = int(hyper_params['warmup_epoch']) momentum = float(hyper_params['momentum']) warmup_momentum = float(hyper_params['warmup_momentum']) hidden_layers_sizes = hyper_params['hidden_layers_sizes'] stream_weights = hyper_params['stream_weights'] private_hidden_sizes = hyper_params['private_hidden_sizes'] buffer_utt_size = buffer_size early_stop_epoch = int(hyper_params['early_stop_epochs']) hidden_activation = hyper_params['hidden_activation'] output_activation = hyper_params['output_activation'] stream_lr_weights = hyper_params['stream_lr_weights'] use_private_hidden = hyper_params['use_private_hidden'] model_type = hyper_params['model_type'] index_to_project = hyper_params['index_to_project'] projection_insize = hyper_params['projection_insize'] projection_outsize = hyper_params['projection_outsize'] ## use a switch to turn on pretraining ## pretraining may not help too much, if this case, we turn it off to save time do_pretraining = hyper_params['do_pretraining'] pretraining_epochs = int(hyper_params['pretraining_epochs']) pretraining_lr = float(hyper_params['pretraining_lr']) initial_projection_distrib = hyper_params['initial_projection_distrib'] buffer_size = int(buffer_size / batch_size) * batch_size ################### (train_x_file_list, train_y_file_list) = train_xy_file_list (valid_x_file_list, valid_y_file_list) = valid_xy_file_list logger.debug('Creating training data provider') train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch) logger.debug('Creating validation data provider') valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch) shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection() train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection() valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy train_data_reader.reset() valid_data_reader.reset() ##temporally we use the training set as pretrain_set_x. ##we need to support any data for pretraining pretrain_set_x = train_set_x # numpy random generator numpy_rng = numpy.random.RandomState(123) logger.info('building the model') dnn_model = None pretrain_fn = None ## not all the model support pretraining right now train_fn = None valid_fn = None valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion if model_type == 'DNN': dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, hidden_activation = hidden_activation, output_activation = output_activation) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size) elif model_type == 'TPDNN': dnn_model = TokenProjectionDNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, hidden_activation = hidden_activation, output_activation = output_activation, projection_insize=projection_insize, projection_outsize=projection_outsize, expand_by_minibatch=expand_by_minibatch, initial_projection_distrib=initial_projection_distrib) train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \ dnn_model.build_finetune_functions( (train_set_x, train_set_x_proj, train_set_y), (valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size) elif model_type == 'SDAE': ##basic model is ready. ##if corruption levels is set to zero. it becomes normal autoencoder dnn_model = StackedDenoiseAutoEncoder(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes) if do_pretraining: pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x, batch_size) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size) elif model_type == 'MSDNN': ##model is ready, but the hyper-parameters are not optimised. dnn_model = MultiStreamDNN(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, stream_weights = stream_weights, hidden_activation = hidden_activation, output_activation = output_activation) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size, lr_weights = stream_lr_weights) elif model_type == 'MSDNN_GV': ## not fully ready dnn_model = MultiStreamDNNGv(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, stream_weights = stream_weights, hidden_activation = hidden_activation, output_activation = output_activation) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size, lr_weights = stream_lr_weights) else: logger.critical('%s type NN model is not supported!' %(model_type)) raise ## if pretraining is supported in one model, add the switch here ## be careful to use autoencoder for pretraining here: ## for SDAE, currently only sigmoid function is supported in the hidden layers, as our input is scaled to [0, 1] ## however, tanh works better and converge fast in finetuning ## ## Will extend this soon... if do_pretraining and model_type == 'SDAE': logger.info('pretraining the %s model' %(model_type)) corruption_level = 0.0 ## in SDAE we do layer-wise pretraining using autoencoders for i in range(dnn_model.n_layers): for epoch in range(pretraining_epochs): sub_start_time = time.clock() pretrain_loss = [] while (not train_data_reader.is_finish()): shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition() pretrain_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True) n_train_batches = pretrain_set_x.get_value().shape[0] / batch_size for batch_index in range(n_train_batches): pretrain_loss.append(pretraining_fn[i](index=batch_index, corruption=corruption_level, learning_rate=pretraining_lr)) sub_end_time = time.clock() logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time))) train_data_reader.reset() logger.info('fine-tuning the %s model' %(model_type)) start_time = time.clock() best_dnn_model = dnn_model best_validation_loss = sys.float_info.max previous_loss = sys.float_info.max early_stop = 0 epoch = 0 previous_finetune_lr = finetune_lr while (epoch < training_epochs): epoch = epoch + 1 current_momentum = momentum current_finetune_lr = finetune_lr if epoch <= warmup_epoch: current_finetune_lr = finetune_lr current_momentum = warmup_momentum else: current_finetune_lr = previous_finetune_lr * 0.5 previous_finetune_lr = current_finetune_lr train_error = [] sub_start_time = time.clock() while (not train_data_reader.is_finish()): shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection() train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True) train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True) train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True) n_train_batches = train_set_x.get_value().shape[0] / batch_size logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) ) for minibatch_index in range(n_train_batches): this_train_error = train_all_fn(minibatch_index, current_finetune_lr, current_momentum) train_error.append(this_train_error) if numpy.isnan(this_train_error): logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) ) train_data_reader.reset() ## osw -- getting validation error from a forward pass in a single batch ## exausts memory when using 20k projected vocab -- also use minibatches logger.debug('calculating validation loss') valid_error = [] n_valid_batches = valid_set_x.get_value().shape[0] / batch_size for minibatch_index in range(n_valid_batches): v_loss = valid_score_i(minibatch_index) valid_error.append(v_loss) this_validation_loss = numpy.mean(valid_error) # this has a possible bias if the minibatches were not all of identical size # but it should not be siginficant if minibatches are small this_train_valid_loss = numpy.mean(train_error) sub_end_time = time.clock() loss_difference = this_validation_loss - previous_loss logger.info('BASIC epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time))) if plot: plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss)) plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss)) plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error') if this_validation_loss < best_validation_loss: best_dnn_model = dnn_model best_validation_loss = this_validation_loss logger.debug('validation loss decreased, so saving model') early_stop = 0 else: logger.debug('validation loss did not improve') dbn = best_dnn_model early_stop += 1 if early_stop > early_stop_epoch: # too many consecutive epochs without surpassing the best model logger.debug('stopping early') break if math.isnan(this_validation_loss): break previous_loss = this_validation_loss ### Save projection values: if cfg.hyper_params['model_type'] == 'TPDNN': if not os.path.isdir(cfg.projection_weights_output_dir): os.mkdir(cfg.projection_weights_output_dir) weights = dnn_model.get_projection_weights() fname = os.path.join(cfg.projection_weights_output_dir, 'proj_BASIC_epoch_%s'%(epoch)) numpy.savetxt(fname, weights) end_time = time.clock() pickle.dump(best_dnn_model, open(nnets_file_name, 'wb')) logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss)) if plot: plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error') ## Function for training all model on train data as well as simultaneously ## inferring proj weights on dev data. # in each epoch do: # train_all_fn() # infer_projections_fn() ## <-- updates proj for devset and gives validation loss def train_DNN_and_traindev_projections(train_xy_file_list, valid_xy_file_list, \ nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False): # get loggers for this function # this one writes to both console and file logger = logging.getLogger("main.train_DNN") logger.debug('Starting train_DNN') if plot: # this one takes care of plotting duties plotlogger = logging.getLogger("plotting") # create an (empty) plot of training convergence, ready to receive data points logger.create_plot('training convergence',MultipleSeriesPlot) try: assert numpy.sum(ms_outs) == n_outs except AssertionError: logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs)) raise ####parameters##### finetune_lr = float(hyper_params['learning_rate']) training_epochs = int(hyper_params['training_epochs']) batch_size = int(hyper_params['batch_size']) l1_reg = float(hyper_params['l1_reg']) l2_reg = float(hyper_params['l2_reg']) private_l2_reg = float(hyper_params['private_l2_reg']) warmup_epoch = int(hyper_params['warmup_epoch']) momentum = float(hyper_params['momentum']) warmup_momentum = float(hyper_params['warmup_momentum']) hidden_layers_sizes = hyper_params['hidden_layers_sizes'] stream_weights = hyper_params['stream_weights'] private_hidden_sizes = hyper_params['private_hidden_sizes'] buffer_utt_size = buffer_size early_stop_epoch = int(hyper_params['early_stop_epochs']) hidden_activation = hyper_params['hidden_activation'] output_activation = hyper_params['output_activation'] stream_lr_weights = hyper_params['stream_lr_weights'] use_private_hidden = hyper_params['use_private_hidden'] model_type = hyper_params['model_type'] index_to_project = hyper_params['index_to_project'] projection_insize = hyper_params['projection_insize'] projection_outsize = hyper_params['projection_outsize'] ## use a switch to turn on pretraining ## pretraining may not help too much, if this case, we turn it off to save time do_pretraining = hyper_params['do_pretraining'] pretraining_epochs = int(hyper_params['pretraining_epochs']) pretraining_lr = float(hyper_params['pretraining_lr']) initial_projection_distrib = hyper_params['initial_projection_distrib'] buffer_size = int(buffer_size / batch_size) * batch_size ################### (train_x_file_list, train_y_file_list) = train_xy_file_list (valid_x_file_list, valid_y_file_list) = valid_xy_file_list logger.debug('Creating training data provider') train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch) logger.debug('Creating validation data provider') valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch) shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection() train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection() valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy train_data_reader.reset() valid_data_reader.reset() ##temporally we use the training set as pretrain_set_x. ##we need to support any data for pretraining pretrain_set_x = train_set_x # numpy random generator numpy_rng = numpy.random.RandomState(123) logger.info('building the model') dnn_model = None pretrain_fn = None ## not all the model support pretraining right now train_fn = None valid_fn = None valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion if model_type == 'DNN': dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, hidden_activation = hidden_activation, output_activation = output_activation) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size) elif model_type == 'TPDNN': dnn_model = TokenProjectionDNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, hidden_activation = hidden_activation, output_activation = output_activation, projection_insize=projection_insize, projection_outsize=projection_outsize, expand_by_minibatch=expand_by_minibatch, initial_projection_distrib=initial_projection_distrib) train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \ dnn_model.build_finetune_functions( (train_set_x, train_set_x_proj, train_set_y), (valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size) elif model_type == 'SDAE': ##basic model is ready. ##if corruption levels is set to zero. it becomes normal autoencoder dnn_model = StackedDenoiseAutoEncoder(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes) if do_pretraining: pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x, batch_size) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size) elif model_type == 'MSDNN': ##model is ready, but the hyper-parameters are not optimised. dnn_model = MultiStreamDNN(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, stream_weights = stream_weights, hidden_activation = hidden_activation, output_activation = output_activation) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size, lr_weights = stream_lr_weights) elif model_type == 'MSDNN_GV': ## not fully ready dnn_model = MultiStreamDNNGv(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, stream_weights = stream_weights, hidden_activation = hidden_activation, output_activation = output_activation) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size, lr_weights = stream_lr_weights) else: logger.critical('%s type NN model is not supported!' %(model_type)) raise ## if pretraining is supported in one model, add the switch here ## be careful to use autoencoder for pretraining here: ## for SDAE, currently only sigmoid function is supported in the hidden layers, as our input is scaled to [0, 1] ## however, tanh works better and converge fast in finetuning ## ## Will extend this soon... if do_pretraining and model_type == 'SDAE': logger.info('pretraining the %s model' %(model_type)) corruption_level = 0.0 ## in SDAE we do layer-wise pretraining using autoencoders for i in range(dnn_model.n_layers): for epoch in range(pretraining_epochs): sub_start_time = time.clock() pretrain_loss = [] while (not train_data_reader.is_finish()): shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition() pretrain_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True) n_train_batches = pretrain_set_x.get_value().shape[0] / batch_size for batch_index in range(n_train_batches): pretrain_loss.append(pretraining_fn[i](index=batch_index, corruption=corruption_level, learning_rate=pretraining_lr)) sub_end_time = time.clock() logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time))) train_data_reader.reset() logger.info('fine-tuning the %s model' %(model_type)) start_time = time.clock() best_dnn_model = dnn_model best_validation_loss = sys.float_info.max previous_loss = sys.float_info.max early_stop = 0 epoch = 0 previous_finetune_lr = finetune_lr ##dnn_model.zero_projection_weights() while (epoch < training_epochs): epoch = epoch + 1 current_momentum = momentum current_finetune_lr = finetune_lr if epoch <= warmup_epoch: current_finetune_lr = finetune_lr current_momentum = warmup_momentum else: current_finetune_lr = previous_finetune_lr * 0.5 previous_finetune_lr = current_finetune_lr train_error = [] sub_start_time = time.clock() while (not train_data_reader.is_finish()): shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection() train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True) train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True) train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True) n_train_batches = train_set_x.get_value().shape[0] / batch_size logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) ) for minibatch_index in range(n_train_batches): this_train_error = train_all_fn(minibatch_index, current_finetune_lr, current_momentum) train_error.append(this_train_error) if numpy.isnan(this_train_error): logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) ) train_data_reader.reset() ## infer validation weights before getting validation error: ## osw -- inferring word reps on validation set in a forward pass in a single batch ## exausts memory when using 20k projected vocab -- also use minibatches logger.debug('infer word representations for validation set') valid_error = [] n_valid_batches = valid_set_x.get_value().shape[0] / batch_size for minibatch_index in range(n_valid_batches): v_loss = infer_projections_fn(minibatch_index, current_finetune_lr, current_momentum) valid_error.append(v_loss) ## this function also give us validation loss: this_validation_loss = numpy.mean(valid_error) ''' ## osw -- getting validation error from a forward pass in a single batch ## exausts memory when using 20k projected vocab -- also use minibatches logger.debug('calculating validation loss') valid_error = [] n_valid_batches = valid_set_x.get_value().shape[0] / batch_size for minibatch_index in xrange(n_valid_batches): v_loss = valid_score_i(minibatch_index) valid_error.append(v_loss) this_validation_loss = numpy.mean(valid_error) ''' # this has a possible bias if the minibatches were not all of identical size # but it should not be siginficant if minibatches are small this_train_valid_loss = numpy.mean(train_error) sub_end_time = time.clock() loss_difference = this_validation_loss - previous_loss logger.info('BASIC epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time))) if plot: plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss)) plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss)) plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error') if this_validation_loss < best_validation_loss: best_dnn_model = dnn_model best_validation_loss = this_validation_loss logger.debug('validation loss decreased, so saving model') early_stop = 0 else: logger.debug('validation loss did not improve') dbn = best_dnn_model early_stop += 1 if early_stop > early_stop_epoch: # too many consecutive epochs without surpassing the best model logger.debug('stopping early') break if math.isnan(this_validation_loss): break previous_loss = this_validation_loss ### Save projection values: if cfg.hyper_params['model_type'] == 'TPDNN': if not os.path.isdir(cfg.projection_weights_output_dir): os.mkdir(cfg.projection_weights_output_dir) weights = dnn_model.get_projection_weights() fname = os.path.join(cfg.projection_weights_output_dir, 'proj_BASIC_epoch_%s'%(epoch)) numpy.savetxt(fname, weights) end_time = time.clock() pickle.dump(best_dnn_model, open(nnets_file_name, 'wb')) logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss)) if plot: plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error') ## Function for training the non-projection part only def train_basic_DNN(train_xy_file_list, valid_xy_file_list, \ nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False): # get loggers for this function # this one writes to both console and file logger = logging.getLogger("main.train_DNN") logger.debug('Starting train_DNN') if plot: # this one takes care of plotting duties plotlogger = logging.getLogger("plotting") # create an (empty) plot of training convergence, ready to receive data points logger.create_plot('training convergence',MultipleSeriesPlot) try: assert numpy.sum(ms_outs) == n_outs except AssertionError: logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs)) raise ####parameters##### finetune_lr = float(hyper_params['learning_rate']) training_epochs = int(hyper_params['training_epochs']) batch_size = int(hyper_params['batch_size']) l1_reg = float(hyper_params['l1_reg']) l2_reg = float(hyper_params['l2_reg']) private_l2_reg = float(hyper_params['private_l2_reg']) warmup_epoch = int(hyper_params['warmup_epoch']) momentum = float(hyper_params['momentum']) warmup_momentum = float(hyper_params['warmup_momentum']) hidden_layers_sizes = hyper_params['hidden_layers_sizes'] stream_weights = hyper_params['stream_weights'] private_hidden_sizes = hyper_params['private_hidden_sizes'] buffer_utt_size = buffer_size early_stop_epoch = int(hyper_params['early_stop_epochs']) hidden_activation = hyper_params['hidden_activation'] output_activation = hyper_params['output_activation'] stream_lr_weights = hyper_params['stream_lr_weights'] use_private_hidden = hyper_params['use_private_hidden'] model_type = hyper_params['model_type'] index_to_project = hyper_params['index_to_project'] projection_insize = hyper_params['projection_insize'] projection_outsize = hyper_params['projection_outsize'] ## use a switch to turn on pretraining ## pretraining may not help too much, if this case, we turn it off to save time do_pretraining = hyper_params['do_pretraining'] pretraining_epochs = int(hyper_params['pretraining_epochs']) pretraining_lr = float(hyper_params['pretraining_lr']) initial_projection_distrib = hyper_params['initial_projection_distrib'] buffer_size = int(buffer_size / batch_size) * batch_size ################### (train_x_file_list, train_y_file_list) = train_xy_file_list (valid_x_file_list, valid_y_file_list) = valid_xy_file_list logger.debug('Creating training data provider') train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch) logger.debug('Creating validation data provider') valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch) shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection() train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection() valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy train_data_reader.reset() valid_data_reader.reset() ##temporally we use the training set as pretrain_set_x. ##we need to support any data for pretraining pretrain_set_x = train_set_x # numpy random generator numpy_rng = numpy.random.RandomState(123) logger.info('building the model') dnn_model = None pretrain_fn = None ## not all the model support pretraining right now train_fn = None valid_fn = None valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion if model_type == 'DNN': dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, hidden_activation = hidden_activation, output_activation = output_activation) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size) elif model_type == 'TPDNN': dnn_model = TokenProjectionDNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, hidden_activation = hidden_activation, output_activation = output_activation, projection_insize=projection_insize, projection_outsize=projection_outsize, expand_by_minibatch=expand_by_minibatch, initial_projection_distrib=initial_projection_distrib) train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \ dnn_model.build_finetune_functions( (train_set_x, train_set_x_proj, train_set_y), (valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size) elif model_type == 'SDAE': ##basic model is ready. ##if corruption levels is set to zero. it becomes normal autoencoder dnn_model = StackedDenoiseAutoEncoder(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes) if do_pretraining: pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x, batch_size) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size) elif model_type == 'MSDNN': ##model is ready, but the hyper-parameters are not optimised. dnn_model = MultiStreamDNN(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, stream_weights = stream_weights, hidden_activation = hidden_activation, output_activation = output_activation) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size, lr_weights = stream_lr_weights) elif model_type == 'MSDNN_GV': ## not fully ready dnn_model = MultiStreamDNNGv(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs, l1_reg = l1_reg, l2_reg = l2_reg, hidden_layers_sizes = hidden_layers_sizes, stream_weights = stream_weights, hidden_activation = hidden_activation, output_activation = output_activation) train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size, lr_weights = stream_lr_weights) else: logger.critical('%s type NN model is not supported!' %(model_type)) raise ## if pretraining is supported in one model, add the switch here ## be careful to use autoencoder for pretraining here: ## for SDAE, currently only sigmoid function is supported in the hidden layers, as our input is scaled to [0, 1] ## however, tanh works better and converge fast in finetuning ## ## Will extend this soon... if do_pretraining and model_type == 'SDAE': logger.info('pretraining the %s model' %(model_type)) corruption_level = 0.0 ## in SDAE we do layer-wise pretraining using autoencoders for i in range(dnn_model.n_layers): for epoch in range(pretraining_epochs): sub_start_time = time.clock() pretrain_loss = [] while (not train_data_reader.is_finish()): shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition() pretrain_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True) n_train_batches = pretrain_set_x.get_value().shape[0] / batch_size for batch_index in range(n_train_batches): pretrain_loss.append(pretraining_fn[i](index=batch_index, corruption=corruption_level, learning_rate=pretraining_lr)) sub_end_time = time.clock() logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time))) train_data_reader.reset() logger.info('fine-tuning the %s model' %(model_type)) start_time = time.clock() best_dnn_model = dnn_model best_validation_loss = sys.float_info.max previous_loss = sys.float_info.max early_stop = 0 epoch = 0 previous_finetune_lr = finetune_lr dnn_model.zero_projection_weights() while (epoch < training_epochs): epoch = epoch + 1 current_momentum = momentum current_finetune_lr = finetune_lr if epoch <= warmup_epoch: current_finetune_lr = finetune_lr current_momentum = warmup_momentum else: current_finetune_lr = previous_finetune_lr * 0.5 previous_finetune_lr = current_finetune_lr train_error = [] sub_start_time = time.clock() while (not train_data_reader.is_finish()): shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection() train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True) train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True) train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True) n_train_batches = train_set_x.get_value().shape[0] / batch_size logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) ) for minibatch_index in range(n_train_batches): this_train_error = train_subword_fn(minibatch_index, current_finetune_lr, current_momentum) train_error.append(this_train_error) if numpy.isnan(this_train_error): logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) ) train_data_reader.reset() ## osw -- getting validation error from a forward pass in a single batch ## exausts memory when using 20k projected vocab -- also use minibatches logger.debug('calculating validation loss') valid_error = [] n_valid_batches = valid_set_x.get_value().shape[0] / batch_size for minibatch_index in range(n_valid_batches): v_loss = valid_score_i(minibatch_index) valid_error.append(v_loss) this_validation_loss = numpy.mean(valid_error) # this has a possible bias if the minibatches were not all of identical size # but it should not be siginficant if minibatches are small this_train_valid_loss = numpy.mean(train_error) sub_end_time = time.clock() loss_difference = this_validation_loss - previous_loss logger.info('BASIC epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time))) if plot: plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss)) plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss)) plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error') if this_validation_loss < best_validation_loss: best_dnn_model = dnn_model best_validation_loss = this_validation_loss logger.debug('validation loss decreased, so saving model') early_stop = 0 else: logger.debug('validation loss did not improve') dbn = best_dnn_model early_stop += 1 if early_stop > early_stop_epoch: # too many consecutive epochs without surpassing the best model logger.debug('stopping early') break if math.isnan(this_validation_loss): break previous_loss = this_validation_loss ### Save projection values: if cfg.hyper_params['model_type'] == 'TPDNN': if not os.path.isdir(cfg.projection_weights_output_dir): os.mkdir(cfg.projection_weights_output_dir) weights = dnn_model.get_projection_weights() fname = os.path.join(cfg.projection_weights_output_dir, 'proj_BASIC_epoch_%s'%(epoch)) numpy.savetxt(fname, weights) end_time = time.clock() pickle.dump(best_dnn_model, open(nnets_file_name, 'wb')) logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss)) if plot: plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error') ### ========== now train the word residual ============ def train_DNN_with_projections(train_xy_file_list, valid_xy_file_list, \ nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False): ####parameters##### finetune_lr = float(hyper_params['learning_rate']) training_epochs = int(hyper_params['training_epochs']) batch_size = int(hyper_params['batch_size']) l1_reg = float(hyper_params['l1_reg']) l2_reg = float(hyper_params['l2_reg']) private_l2_reg = float(hyper_params['private_l2_reg']) warmup_epoch = int(hyper_params['warmup_epoch']) momentum = float(hyper_params['momentum']) warmup_momentum = float(hyper_params['warmup_momentum']) hidden_layers_sizes = hyper_params['hidden_layers_sizes'] stream_weights = hyper_params['stream_weights'] private_hidden_sizes = hyper_params['private_hidden_sizes'] buffer_utt_size = buffer_size early_stop_epoch = int(hyper_params['early_stop_epochs']) hidden_activation = hyper_params['hidden_activation'] output_activation = hyper_params['output_activation'] stream_lr_weights = hyper_params['stream_lr_weights'] use_private_hidden = hyper_params['use_private_hidden'] model_type = hyper_params['model_type'] index_to_project = hyper_params['index_to_project'] projection_insize = hyper_params['projection_insize'] projection_outsize = hyper_params['projection_outsize'] ######### data providers ########## (train_x_file_list, train_y_file_list) = train_xy_file_list (valid_x_file_list, valid_y_file_list) = valid_xy_file_list logger.debug('Creating training data provider') train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch) logger.debug('Creating validation data provider') valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch) shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection() train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection() valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy train_data_reader.reset() valid_data_reader.reset() #################################### # numpy random generator numpy_rng = numpy.random.RandomState(123) logger.info('building the model') ############## load existing dnn ##### dnn_model = pickle.load(open(nnets_file_name, 'rb')) train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \ dnn_model.build_finetune_functions( (train_set_x, train_set_x_proj, train_set_y), (valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size) #################################### logger.info('fine-tuning the %s model' %(model_type)) start_time = time.clock() best_dnn_model = dnn_model best_validation_loss = sys.float_info.max previous_loss = sys.float_info.max early_stop = 0 epoch = 0 previous_finetune_lr = finetune_lr dnn_model.initialise_projection_weights() all_epochs = 20 ## 100 ## <-------- hard coded !!!!!!!!!! current_finetune_lr = previous_finetune_lr = finetune_lr warmup_epoch_2 = 10 # 10 ## <-------- hard coded !!!!!!!!!! while (epoch < all_epochs): epoch = epoch + 1 current_momentum = momentum if epoch > warmup_epoch_2: previous_finetune_lr = current_finetune_lr current_finetune_lr = previous_finetune_lr * 0.5 train_error = [] sub_start_time = time.clock() while (not train_data_reader.is_finish()): shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection() train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True) train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True) train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True) n_train_batches = train_set_x.get_value().shape[0] / batch_size logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) ) for minibatch_index in range(n_train_batches): this_train_error = train_word_fn(minibatch_index, current_finetune_lr, current_momentum) train_error.append(this_train_error) if numpy.isnan(this_train_error): logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) ) train_data_reader.reset() ### COULD REMOVE THIS LATER ## osw -- getting validation error from a forward pass in a single batch ## exausts memory when using 20k projected vocab -- also use minibatches logger.debug('calculating validation loss') valid_error = [] n_valid_batches = valid_set_x.get_value().shape[0] / batch_size for minibatch_index in range(n_valid_batches): v_loss = valid_score_i(minibatch_index) valid_error.append(v_loss) this_validation_loss = numpy.mean(valid_error) # this has a possible bias if the minibatches were not all of identical size # but it should not be siginficant if minibatches are small this_train_valid_loss = numpy.mean(train_error) # if plot: # ## add dummy validation loss so that plot works: # plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss)) # plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss)) # sub_end_time = time.clock() logger.info('TOKEN epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time))) if cfg.hyper_params['model_type'] == 'TPDNN': if not os.path.isdir(cfg.projection_weights_output_dir): os.mkdir(cfg.projection_weights_output_dir) weights = dnn_model.get_projection_weights() fname = os.path.join(cfg.projection_weights_output_dir, 'proj_TOKEN_epoch_%s'%(epoch)) numpy.savetxt(fname, weights) best_dnn_model = dnn_model ## always update end_time = time.clock() pickle.dump(best_dnn_model, open(nnets_file_name, 'wb')) logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss)) # if plot: # plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error') # ### ======================================================== ### ========== now infer word represntations for out-of-training (dev) data ============ # # ### TEMP-- restarted!!! ### ~~~~~~~ # epoch = 50 # dnn_model = cPickle.load(open(nnets_file_name, 'rb')) # train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \ # dnn_model.build_finetune_functions( # (train_set_x, train_set_x_proj, train_set_y), # (valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size) # this_train_valid_loss = 198.0 ## approx value # ### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def infer_projections(train_xy_file_list, valid_xy_file_list, \ nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False): ####parameters##### finetune_lr = float(hyper_params['learning_rate']) training_epochs = int(hyper_params['training_epochs']) batch_size = int(hyper_params['batch_size']) l1_reg = float(hyper_params['l1_reg']) l2_reg = float(hyper_params['l2_reg']) private_l2_reg = float(hyper_params['private_l2_reg']) warmup_epoch = int(hyper_params['warmup_epoch']) momentum = float(hyper_params['momentum']) warmup_momentum = float(hyper_params['warmup_momentum']) hidden_layers_sizes = hyper_params['hidden_layers_sizes'] stream_weights = hyper_params['stream_weights'] private_hidden_sizes = hyper_params['private_hidden_sizes'] buffer_utt_size = buffer_size early_stop_epoch = int(hyper_params['early_stop_epochs']) hidden_activation = hyper_params['hidden_activation'] output_activation = hyper_params['output_activation'] stream_lr_weights = hyper_params['stream_lr_weights'] use_private_hidden = hyper_params['use_private_hidden'] model_type = hyper_params['model_type'] index_to_project = hyper_params['index_to_project'] projection_insize = hyper_params['projection_insize'] projection_outsize = hyper_params['projection_outsize'] ######### data providers ########## (train_x_file_list, train_y_file_list) = train_xy_file_list (valid_x_file_list, valid_y_file_list) = valid_xy_file_list logger.debug('Creating training data provider') train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch) logger.debug('Creating validation data provider') valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch) shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection() train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection() valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy train_data_reader.reset() valid_data_reader.reset() #################################### # numpy random generator numpy_rng = numpy.random.RandomState(123) logger.info('building the model') ############## load existing dnn ##### dnn_model = pickle.load(open(nnets_file_name, 'rb')) train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \ dnn_model.build_finetune_functions( (train_set_x, train_set_x_proj, train_set_y), (valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size) #################################### logger.info('fine-tuning the %s model' %(model_type)) start_time = time.clock() best_dnn_model = dnn_model best_validation_loss = sys.float_info.max previous_loss = sys.float_info.max early_stop = 0 epoch = 0 previous_finetune_lr = finetune_lr logger.info('fine-tuning the %s model' %(model_type)) #dnn_model.initialise_projection_weights() inference_epochs = 20 ## <-------- hard coded !!!!!!!!!! current_finetune_lr = previous_finetune_lr = finetune_lr warmup_epoch_3 = 10 # 10 ## <-------- hard coded !!!!!!!!!! #warmup_epoch_3 = epoch + warmup_epoch_3 #inference_epochs += epoch while (epoch < inference_epochs): epoch = epoch + 1 current_momentum = momentum if epoch > warmup_epoch_3: previous_finetune_lr = current_finetune_lr current_finetune_lr = previous_finetune_lr * 0.5 dev_error = [] sub_start_time = time.clock() ## osw -- inferring word reps on validation set in a forward pass in a single batch ## exausts memory when using 20k projected vocab -- also use minibatches logger.debug('infer word representations for validation set') valid_error = [] n_valid_batches = valid_set_x.get_value().shape[0] / batch_size for minibatch_index in range(n_valid_batches): v_loss = infer_projections_fn(minibatch_index, current_finetune_lr, current_momentum) valid_error.append(v_loss) this_validation_loss = numpy.mean(valid_error) #valid_error = infer_projections_fn(current_finetune_lr, current_momentum) #this_validation_loss = numpy.mean(valid_error) # if plot: # ## add dummy validation loss so that plot works: # plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss)) # plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss)) # sub_end_time = time.clock() logger.info('INFERENCE epoch %i, validation error %f, time spent %.2f' %(epoch, this_validation_loss, (sub_end_time - sub_start_time))) if cfg.hyper_params['model_type'] == 'TPDNN': if not os.path.isdir(cfg.projection_weights_output_dir): os.mkdir(cfg.projection_weights_output_dir) weights = dnn_model.get_projection_weights() fname = os.path.join(cfg.projection_weights_output_dir, 'proj_INFERENCE_epoch_%s'%(epoch)) numpy.savetxt(fname, weights) best_dnn_model = dnn_model ## always update end_time = time.clock() pickle.dump(best_dnn_model, open(nnets_file_name, 'wb')) logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss)) # if plot: # plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error') # ### ======================================================== if cfg.hyper_params['model_type'] == 'TPDNN': os.system('python %s %s'%('/afs/inf.ed.ac.uk/user/o/owatts/scripts_NEW/plot_weights_multiple_phases.py', cfg.projection_weights_output_dir)) return best_validation_loss def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, cfg=None, use_word_projections=True): logger = logging.getLogger("dnn_generation") logger.debug('Starting dnn_generation') plotlogger = logging.getLogger("plotting") dnn_model = pickle.load(open(nnets_file_name, 'rb')) ## 'remove' word representations by randomising them. As model is unpickled and ## no re-saved, this does not throw trained parameters away. if not use_word_projections: dnn_model.initialise_projection_weights() # visualize_dnn(dbn) file_number = len(valid_file_list) for i in range(file_number): logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) ) fid_lab = open(valid_file_list[i], 'rb') features = numpy.fromfile(fid_lab, dtype=numpy.float32) fid_lab.close() features = features[:(n_ins * (features.size / n_ins))] features = features.reshape((-1, n_ins)) #features, features_proj = expand_projection_inputs(features, cfg.index_to_project, \ # cfg.projection_insize) features, features_proj = get_unexpanded_projection_inputs(features, cfg.index_to_project, \ cfg.projection_insize) #temp_set_x = features.tolist() ## osw - why list conversion necessary? #print temp_set_x test_set_x = theano.shared(numpy.asarray(features, dtype=theano.config.floatX)) test_set_x_proj = theano.shared(numpy.asarray(features_proj, dtype='int32')) predicted_parameter = dnn_model.parameter_prediction(test_set_x=test_set_x, test_set_x_proj=test_set_x_proj) # predicted_parameter = test_out() ### write to cmp file predicted_parameter = numpy.array(predicted_parameter, 'float32') temp_parameter = predicted_parameter fid = open(out_file_list[i], 'wb') predicted_parameter.tofile(fid) logger.debug('saved to %s' % out_file_list[i]) fid.close() ##generate bottleneck layer as festures def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list): logger = logging.getLogger("dnn_generation") logger.debug('Starting dnn_generation') plotlogger = logging.getLogger("plotting") dnn_model = pickle.load(open(nnets_file_name, 'rb')) file_number = len(valid_file_list) for i in range(file_number): logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) ) fid_lab = open(valid_file_list[i], 'rb') features = numpy.fromfile(fid_lab, dtype=numpy.float32) fid_lab.close() features = features[:(n_ins * (features.size / n_ins))] features = features.reshape((-1, n_ins)) temp_set_x = features.tolist() test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX)) predicted_parameter = dnn_model.generate_top_hidden_layer(test_set_x=test_set_x) ### write to cmp file predicted_parameter = numpy.array(predicted_parameter, 'float32') temp_parameter = predicted_parameter fid = open(out_file_list[i], 'wb') predicted_parameter.tofile(fid) logger.debug('saved to %s' % out_file_list[i]) fid.close() def main_function(cfg): # get a logger for this main function logger = logging.getLogger("main") # get another logger to handle plotting duties plotlogger = logging.getLogger("plotting") # later, we might do this via a handler that is created, attached and configured # using the standard config mechanism of the logging module # but for now we need to do it manually plotlogger.set_plot_path(cfg.plot_dir) #### parameter setting######## hidden_layers_sizes = cfg.hyper_params['hidden_layers_sizes'] ####prepare environment try: file_id_list = read_file_list(cfg.file_id_scp) logger.debug('Loaded file id list from %s' % cfg.file_id_scp) except IOError: # this means that open(...) threw an error logger.critical('Could not load file id list from %s' % cfg.file_id_scp) raise ###total file number including training, development, and testing total_file_number = len(file_id_list) data_dir = cfg.data_dir nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim)) nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim)) model_dir = os.path.join(cfg.work_dir, 'nnets_model') gen_dir = os.path.join(cfg.work_dir, 'gen') in_file_list_dict = {} for feature_name in list(cfg.in_dir_dict.keys()): in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False) nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext) nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext) ###normalisation information norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat') ### normalise input full context label # currently supporting two different forms of lingustic features # later, we should generalise this if cfg.label_style == 'HTS': label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name) lab_dim = label_normaliser.dimension logger.info('Input label dimension is %d' % lab_dim) suffix=str(lab_dim) # no longer supported - use new "composed" style labels instead elif cfg.label_style == 'composed': # label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name) suffix='composed' if cfg.process_labels_in_work_dir: label_data_dir = cfg.work_dir else: label_data_dir = data_dir # the number can be removed binary_label_dir = os.path.join(label_data_dir, 'binary_label_'+suffix) nn_label_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_'+suffix) nn_label_norm_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_norm_'+suffix) # nn_label_norm_mvn_dir = os.path.join(data_dir, 'nn_no_silence_lab_norm_'+suffix) in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False) binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext) nn_label_file_list = prepare_file_path_list(file_id_list, nn_label_dir, cfg.lab_ext) nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext) # to do - sanity check the label dimension here? min_max_normaliser = None label_norm_file = 'label_norm_%s.dat' %(cfg.label_style) label_norm_file = os.path.join(label_data_dir, label_norm_file) if cfg.NORMLAB and (cfg.label_style == 'HTS'): # simple HTS labels logger.info('preparing label data (input) using standard HTS style labels') label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list) remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = ['*-#+*']) remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list) min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99) ###use only training data to find min-max information, then apply on the whole dataset min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number]) min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list) if cfg.NORMLAB and (cfg.label_style == 'composed'): # new flexible label preprocessor logger.info('preparing label data (input) using "composed" style labels') label_composer = LabelComposer() label_composer.load_label_configuration(cfg.label_config_file) logger.info('Loaded label configuration') # logger.info('%s' % label_composer.configuration.labels ) lab_dim=label_composer.compute_label_dimension() logger.info('label dimension will be %d' % lab_dim) if cfg.precompile_xpaths: label_composer.precompile_xpaths() # there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees) # create all the lists of these, ready to pass to the label composer in_label_align_file_list = {} for label_style, label_style_required in label_composer.label_styles.items(): if label_style_required: logger.info('labels of style %s are required - constructing file paths for them' % label_style) if label_style == 'xpath': in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, cfg.xpath_label_align_dir, cfg.utt_ext, False) elif label_style == 'hts': in_label_align_file_list['hts'] = prepare_file_path_list(file_id_list, cfg.hts_label_align_dir, cfg.lab_ext, False) else: logger.critical('unsupported label style %s specified in label configuration' % label_style) raise Exception # now iterate through the files, one at a time, constructing the labels for them num_files=len(file_id_list) logger.info('the label styles required are %s' % label_composer.label_styles) for i in range(num_files): logger.info('making input label features for %4d of %4d' % (i+1,num_files)) # iterate through the required label styles and open each corresponding label file # a dictionary of file descriptors, pointing at the required files required_labels={} for label_style, label_style_required in label_composer.label_styles.items(): # the files will be a parallel set of files for a single utterance # e.g., the XML tree and an HTS label file if label_style_required: required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r') logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i]) logger.debug('label styles with open files: %s' % required_labels) label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames) # now close all opened files for fd in required_labels.values(): fd.close() # silence removal if cfg.remove_silence_using_binary_labels: silence_feature = 0 ## use first feature in label -- hardcoded for now logger.info('Silence removal from label using silence feature: %s'%(label_composer.configuration.labels[silence_feature])) logger.info('Silence will be removed from CMP files in same way') ## Binary labels have 2 roles: both the thing trimmed and the instructions for trimming: trim_silence(binary_label_file_list, nn_label_file_list, lab_dim, \ binary_label_file_list, lab_dim, silence_feature, percent_to_keep=5) else: logger.info('No silence removal done') # start from the labels we have just produced, not trimmed versions nn_label_file_list = binary_label_file_list min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99, exclude_columns=[cfg.index_to_project]) ###use only training data to find min-max information, then apply on the whole dataset min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number]) min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list) if min_max_normaliser != None: ### save label normalisation information for unseen testing labels label_min_vector = min_max_normaliser.min_vector label_max_vector = min_max_normaliser.max_vector label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0) label_norm_info = numpy.array(label_norm_info, 'float32') fid = open(label_norm_file, 'wb') label_norm_info.tofile(fid) fid.close() logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file)) ### make output acoustic data if cfg.MAKECMP: logger.info('creating acoustic (output) features') delta_win = [-0.5, 0.0, 0.5] acc_win = [1.0, -2.0, 1.0] acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win) acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict) if cfg.remove_silence_using_binary_labels: ## do this to get lab_dim: label_composer = LabelComposer() label_composer.load_label_configuration(cfg.label_config_file) lab_dim=label_composer.compute_label_dimension() silence_feature = 0 ## use first feature in label -- hardcoded for now logger.info('Silence removal from CMP using binary label file') ## overwrite the untrimmed audio with the trimmed version: trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim, \ binary_label_file_list, lab_dim, silence_feature, percent_to_keep=5) else: ## back off to previous method using HTS labels: remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = ['*-#+*']) remover.remove_silence(nn_cmp_file_list, in_label_align_file_list, nn_cmp_file_list) # save to itself ### save acoustic normalisation information for normalising the features back var_dir = os.path.join(data_dir, 'var') if not os.path.exists(var_dir): os.makedirs(var_dir) var_file_dict = {} for feature_name in list(cfg.out_dimension_dict.keys()): var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name])) ### normalise output acoustic data if cfg.NORMCMP: logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation) cmp_norm_info = None if cfg.output_feature_normalisation == 'MVN': normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim) ###calculate mean and std vectors on the training data, and apply on the whole dataset global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim) global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim) normaliser.feature_normalisation(nn_cmp_file_list, nn_cmp_norm_file_list) cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0) elif cfg.output_feature_normalisation == 'MINMAX': min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim) global_mean_vector = min_max_normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number]) global_std_vector = min_max_normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector) min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99) min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number]) min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list) cmp_min_vector = min_max_normaliser.min_vector cmp_max_vector = min_max_normaliser.max_vector cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0) else: logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation)) raise cmp_norm_info = numpy.array(cmp_norm_info, 'float32') fid = open(norm_info_file, 'wb') cmp_norm_info.tofile(fid) fid.close() logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file)) # logger.debug(' value was\n%s' % cmp_norm_info) feature_index = 0 for feature_name in list(cfg.out_dimension_dict.keys()): feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32') fid = open(var_file_dict[feature_name], 'w') feature_std_vector.tofile(fid) fid.close() logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name])) # logger.debug(' value was\n%s' % feature_std_vector) feature_index += cfg.out_dimension_dict[feature_name] train_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number] train_y_file_list = nn_cmp_norm_file_list[0:cfg.train_file_number] valid_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number] valid_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number] test_x_file_list = nn_label_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] test_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] # we need to know the label dimension before training the DNN # computing that requires us to look at the labels # # currently, there are two ways to do this if cfg.label_style == 'HTS': label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name) lab_dim = label_normaliser.dimension elif cfg.label_style == 'composed': label_composer = LabelComposer() label_composer.load_label_configuration(cfg.label_config_file) lab_dim=label_composer.compute_label_dimension() logger.info('label dimension is %d' % lab_dim) combined_model_arch = str(len(hidden_layers_sizes)) for hid_size in hidden_layers_sizes: combined_model_arch += '_' + str(hid_size) # nnets_file_name = '%s/%s_%s_%d.%d.%d.%d.%d.train.%d.model' \ # %(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch), # len(hidden_layers_sizes), hidden_layers_sizes[0], # lab_dim, cfg.cmp_dim, cfg.train_file_number) nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.model' \ %(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch), combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number) ### DNN model training if cfg.TRAINDNN: logger.info('training DNN') try: os.makedirs(model_dir) except OSError as e: if e.errno == errno.EEXIST: # not an error - just means directory already exists pass else: logger.critical('Failed to create model directory %s' % model_dir) logger.critical(' OS error was: %s' % e.strerror) raise try: if cfg.scheme == 'stagwise': train_basic_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \ valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \ nnets_file_name = nnets_file_name, \ n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \ hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot) train_DNN_with_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \ valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \ nnets_file_name = nnets_file_name, \ n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \ hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot) infer_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \ valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \ nnets_file_name = nnets_file_name, \ n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \ hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot) elif cfg.scheme == 'simultaneous': train_DNN_and_traindev_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \ valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \ nnets_file_name = nnets_file_name, \ n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \ hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot) else: sys.exit('unknown scheme!') # train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \ # valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \ # nnets_file_name = nnets_file_name, \ # n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \ # hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot) # infer_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \ # valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \ # nnets_file_name = nnets_file_name, \ # n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \ # hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot) except KeyboardInterrupt: logger.critical('train_DNN interrupted via keyboard') # Could 'raise' the exception further, but that causes a deep traceback to be printed # which we don't care about for a keyboard interrupt. So, just bail out immediately sys.exit(1) except: logger.critical('train_DNN threw an exception') raise ### generate parameters from DNN (with random token reps and inferred ones -- NOTOKENS & TOKENS) temp_dir_name_NOTOKENS = '%s_%s_%d_%d_%d_%d_%d_%d_NOTOKENS' \ %(cfg.model_type, cfg.combined_feature_name, int(cfg.do_post_filtering), \ cfg.train_file_number, lab_dim, cfg.cmp_dim, \ len(hidden_layers_sizes), hidden_layers_sizes[0]) gen_dir_NOTOKENS = os.path.join(gen_dir, temp_dir_name_NOTOKENS) temp_dir_name_TOKENS = '%s_%s_%d_%d_%d_%d_%d_%d_TOKENS' \ %(cfg.model_type, cfg.combined_feature_name, int(cfg.do_post_filtering), \ cfg.train_file_number, lab_dim, cfg.cmp_dim, \ len(hidden_layers_sizes), hidden_layers_sizes[0]) gen_dir_TOKENS = os.path.join(gen_dir, temp_dir_name_TOKENS) gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] if cfg.DNNGEN: logger.info('generating from DNN') try: os.makedirs(gen_dir) except OSError as e: if e.errno == errno.EEXIST: # not an error - just means directory already exists pass else: logger.critical('Failed to create generation directory %s' % gen_dir) logger.critical(' OS error was: %s' % e.strerror) raise ## Without words embeddings: gen_file_list_NOTOKENS = prepare_file_path_list(gen_file_id_list, gen_dir_NOTOKENS, cfg.cmp_ext) dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list_NOTOKENS, cfg=cfg, use_word_projections=False) ## With word embeddings: gen_file_list_TOKENS = prepare_file_path_list(gen_file_id_list, gen_dir_TOKENS, cfg.cmp_ext) dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list_TOKENS, cfg=cfg, use_word_projections=True) logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation) for gen_file_list in [gen_file_list_NOTOKENS, gen_file_list_TOKENS]: fid = open(norm_info_file, 'rb') cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32) fid.close() cmp_min_max = cmp_min_max.reshape((2, -1)) cmp_min_vector = cmp_min_max[0, ] cmp_max_vector = cmp_min_max[1, ] if cfg.output_feature_normalisation == 'MVN': denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim) denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector) elif cfg.output_feature_normalisation == 'MINMAX': denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector) denormaliser.denormalise_data(gen_file_list, gen_file_list) else: logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation)) raise ##perform MLPG to smooth parameter trajectory ## lf0 is included, the output features much have vuv. generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features) generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict) ## osw: skip MLPG: # split_cmp(gen_file_list, ['mgc', 'lf0', 'bap'], cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict) ### generate wav if cfg.GENWAV: logger.info('reconstructing waveform(s)') for gen_dir in [gen_dir_NOTOKENS, gen_dir_TOKENS]: generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech # generate_wav(nn_cmp_dir, gen_file_id_list) # reference copy synthesis speech ### evaluation: calculate distortion if cfg.CALMCD: logger.info('calculating MCD') ref_data_dir = os.path.join(data_dir, 'ref_data') ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext) ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext) ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext) in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] calculator = IndividualDistortionComp() spectral_distortion = 0.0 bap_mse = 0.0 f0_mse = 0.0 vuv_error = 0.0 valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number] test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] if cfg.remove_silence_using_binary_labels: ## get lab_dim: label_composer = LabelComposer() label_composer.load_label_configuration(cfg.label_config_file) lab_dim=label_composer.compute_label_dimension() ## use first feature in label -- hardcoded for now silence_feature = 0 ## Use these to trim silence: untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] if 'mgc' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \ untrimmed_test_labels, lab_dim, silence_feature) else: remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = ['*-#+*']) remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list) valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim) test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim) valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD if 'bap' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \ untrimmed_test_labels, lab_dim, silence_feature) else: remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = ['*-#+*']) remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list) valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim) test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim) valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC if 'lf0' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \ untrimmed_test_labels, lab_dim, silence_feature) else: remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = ['*-#+*']) remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list) valid_f0_mse, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim) test_f0_mse , test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim) logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \ %(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_vuv_error*100.)) logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \ %(test_spectral_distortion , test_bap_mse , test_f0_mse , test_vuv_error*100.)) # this can be removed # if 0: #to calculate distortion of HMM baseline hmm_gen_no_silence_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nick_hmm_pf_2400_no_silence' hmm_gen_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nick_hmm_pf_2400' if 1: hmm_mgc_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.mgc_ext) hmm_bap_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.bap_ext) hmm_lf0_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.lf0_ext) hmm_mgc_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.mgc_ext) hmm_bap_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.bap_ext) hmm_lf0_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.lf0_ext) in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = ['*-#+*']) remover.remove_silence(hmm_mgc_list, in_gen_label_align_file_list, hmm_mgc_no_silence_list) remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = ['*-#+*']) remover.remove_silence(hmm_bap_list, in_gen_label_align_file_list, hmm_bap_no_silence_list) remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = ['*-#+*']) remover.remove_silence(hmm_lf0_list, in_gen_label_align_file_list, hmm_lf0_no_silence_list) calculator = IndividualDistortionComp() spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.mgc_ext, cfg.mgc_dim) bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.bap_ext, cfg.bap_dim) f0_mse, vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.lf0_ext, cfg.lf0_dim) spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) bap_mse = bap_mse / 10.0 logger.info('Develop: HMM -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' %(spectral_distortion, bap_mse, f0_mse, vuv_error*100.)) spectral_distortion = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.mgc_ext, cfg.mgc_dim) bap_mse = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.bap_ext, cfg.bap_dim) f0_mse, vuv_error = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.lf0_ext, cfg.lf0_dim) spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) bap_mse = bap_mse / 10.0 logger.info('Test : HMM -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' %(spectral_distortion, bap_mse, f0_mse, vuv_error*100.)) if __name__ == '__main__': # these things should be done even before trying to parse the command line # create a configuration instance # and get a short name for this instance cfg=configuration.cfg # set up logging to use our custom class logging.setLoggerClass(LoggerPlotter) # get a logger for this main function logger = logging.getLogger("main") if len(sys.argv) != 2: logger.critical('usage: run_dnn.sh [config file name]') sys.exit(1) config_file = sys.argv[1] config_file = os.path.abspath(config_file) cfg.configure(config_file) if cfg.profile: logger.info('profiling is activated') import cProfile, pstats cProfile.run('main_function(cfg)', 'mainstats') # create a stream for the profiler to write to profiling_output = io.StringIO() p = pstats.Stats('mainstats', stream=profiling_output) # print stats to that stream # here we just report the top 10 functions, sorted by total amount of time spent in each p.strip_dirs().sort_stats('tottime').print_stats(10) # print the result to the log logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() ) profiling_output.close() logger.info('---End of profiling result---') else: main_function(cfg) sys.exit(0)
bajibabu/merlin
src/work_in_progress/oliver/run_tpdnn.py
Python
apache-2.0
101,142
[ "NEURON" ]
087e7da80b5ac49449bb00d916da04b79ef7442f56983f99f2bf14047ea04588
# -*- coding: utf-8 -*- # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # (mostly translation, see implementation details) # Licence: BSD 3 clause from __future__ import print_function import numpy as np from scipy import linalg, optimize from ..base import BaseEstimator, RegressorMixin from ..metrics.pairwise import manhattan_distances from ..utils import check_random_state, check_array, check_X_y from ..utils.validation import check_is_fitted from . import regression_models as regression from . import correlation_models as correlation from ..utils import deprecated MACHINE_EPSILON = np.finfo(np.double).eps @deprecated("l1_cross_distances is deprecated and will be removed in 0.20.") def l1_cross_distances(X): """ Computes the nonzero componentwise L1 cross-distances between the vectors in X. Parameters ---------- X: array_like An array with shape (n_samples, n_features) Returns ------- D: array with shape (n_samples * (n_samples - 1) / 2, n_features) The array of componentwise L1 cross-distances. ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2) The indices i and j of the vectors in X associated to the cross- distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]). """ X = check_array(X) n_samples, n_features = X.shape n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2 ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int) D = np.zeros((n_nonzero_cross_dist, n_features)) ll_1 = 0 for k in range(n_samples - 1): ll_0 = ll_1 ll_1 = ll_0 + n_samples - k - 1 ij[ll_0:ll_1, 0] = k ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples) D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples]) return D, ij @deprecated("GaussianProcess is deprecated and will be removed in 0.20. " "Use the GaussianProcessRegressor instead.") class GaussianProcess(BaseEstimator, RegressorMixin): """The legacy Gaussian Process model class. Note that this class is deprecated and will be removed in 0.20. Use the GaussianProcessRegressor instead. Read more in the :ref:`User Guide <gaussian_process>`. Parameters ---------- regr : string or callable, optional A regression function returning an array of outputs of the linear regression functional basis. The number of observations n_samples should be greater than the size p of this basis. Default assumes a simple constant regression trend. Available built-in regression models are:: 'constant', 'linear', 'quadratic' corr : string or callable, optional A stationary autocorrelation function returning the autocorrelation between two points x and x'. Default assumes a squared-exponential autocorrelation model. Built-in correlation models are:: 'absolute_exponential', 'squared_exponential', 'generalized_exponential', 'cubic', 'linear' beta0 : double array_like, optional The regression weight vector to perform Ordinary Kriging (OK). Default assumes Universal Kriging (UK) so that the vector beta of regression weights is estimated using the maximum likelihood principle. storage_mode : string, optional A string specifying whether the Cholesky decomposition of the correlation matrix should be stored in the class (storage_mode = 'full') or not (storage_mode = 'light'). Default assumes storage_mode = 'full', so that the Cholesky decomposition of the correlation matrix is stored. This might be a useful parameter when one is not interested in the MSE and only plan to estimate the BLUP, for which the correlation matrix is not required. verbose : boolean, optional A boolean specifying the verbose level. Default is verbose = False. theta0 : double array_like, optional An array with shape (n_features, ) or (1, ). The parameters in the autocorrelation model. If thetaL and thetaU are also specified, theta0 is considered as the starting point for the maximum likelihood estimation of the best set of parameters. Default assumes isotropic autocorrelation model with theta0 = 1e-1. thetaL : double array_like, optional An array with shape matching theta0's. Lower bound on the autocorrelation parameters for maximum likelihood estimation. Default is None, so that it skips maximum likelihood estimation and it uses theta0. thetaU : double array_like, optional An array with shape matching theta0's. Upper bound on the autocorrelation parameters for maximum likelihood estimation. Default is None, so that it skips maximum likelihood estimation and it uses theta0. normalize : boolean, optional Input X and observations y are centered and reduced wrt means and standard deviations estimated from the n_samples observations provided. Default is normalize = True so that data is normalized to ease maximum likelihood estimation. nugget : double or ndarray, optional Introduce a nugget effect to allow smooth predictions from noisy data. If nugget is an ndarray, it must be the same length as the number of data points used for the fit. The nugget is added to the diagonal of the assumed training covariance; in this way it acts as a Tikhonov regularization in the problem. In the special case of the squared exponential correlation function, the nugget mathematically represents the variance of the input values. Default assumes a nugget close to machine precision for the sake of robustness (nugget = 10. * MACHINE_EPSILON). optimizer : string, optional A string specifying the optimization algorithm to be used. Default uses 'fmin_cobyla' algorithm from scipy.optimize. Available optimizers are:: 'fmin_cobyla', 'Welch' 'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_. It consists in iterating over several one-dimensional optimizations instead of running one single multi-dimensional optimization. random_start : int, optional The number of times the Maximum Likelihood Estimation should be performed from a random starting point. The first MLE always uses the specified starting point (theta0), the next starting points are picked at random according to an exponential distribution (log-uniform on [thetaL, thetaU]). Default does not use random starting point (random_start = 1). random_state: integer or numpy.RandomState, optional The generator used to shuffle the sequence of coordinates of theta in the Welch optimizer. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- theta_ : array Specified theta OR the best set of autocorrelation parameters (the \ sought maximizer of the reduced likelihood function). reduced_likelihood_function_value_ : array The optimal reduced likelihood function value. Examples -------- >>> import numpy as np >>> from sklearn.gaussian_process import GaussianProcess >>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T >>> y = (X * np.sin(X)).ravel() >>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.) >>> gp.fit(X, y) # doctest: +ELLIPSIS GaussianProcess(beta0=None... ... Notes ----- The presentation implementation is based on a translation of the DACE Matlab toolbox, see reference [NLNS2002]_. References ---------- .. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J. Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002) http://www2.imm.dtu.dk/~hbn/dace/dace.pdf .. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell, and M.D. Morris (1992). Screening, predicting, and computer experiments. Technometrics, 34(1) 15--25.` http://www.jstor.org/pss/1269548 """ _regression_types = { 'constant': regression.constant, 'linear': regression.linear, 'quadratic': regression.quadratic} _correlation_types = { 'absolute_exponential': correlation.absolute_exponential, 'squared_exponential': correlation.squared_exponential, 'generalized_exponential': correlation.generalized_exponential, 'cubic': correlation.cubic, 'linear': correlation.linear} _optimizer_types = [ 'fmin_cobyla', 'Welch'] def __init__(self, regr='constant', corr='squared_exponential', beta0=None, storage_mode='full', verbose=False, theta0=1e-1, thetaL=None, thetaU=None, optimizer='fmin_cobyla', random_start=1, normalize=True, nugget=10. * MACHINE_EPSILON, random_state=None): self.regr = regr self.corr = corr self.beta0 = beta0 self.storage_mode = storage_mode self.verbose = verbose self.theta0 = theta0 self.thetaL = thetaL self.thetaU = thetaU self.normalize = normalize self.nugget = nugget self.optimizer = optimizer self.random_start = random_start self.random_state = random_state def fit(self, X, y): """ The Gaussian Process model fitting method. Parameters ---------- X : double array_like An array with shape (n_samples, n_features) with the input at which observations were made. y : double array_like An array with shape (n_samples, ) or shape (n_samples, n_targets) with the observations of the output to be predicted. Returns ------- gp : self A fitted Gaussian Process model object awaiting data to perform predictions. """ # Run input checks self._check_params() self.random_state = check_random_state(self.random_state) # Force data to 2D numpy.array X, y = check_X_y(X, y, multi_output=True, y_numeric=True) self.y_ndim_ = y.ndim if y.ndim == 1: y = y[:, np.newaxis] # Check shapes of DOE & observations n_samples, n_features = X.shape _, n_targets = y.shape # Run input checks self._check_params(n_samples) # Normalize data or don't if self.normalize: X_mean = np.mean(X, axis=0) X_std = np.std(X, axis=0) y_mean = np.mean(y, axis=0) y_std = np.std(y, axis=0) X_std[X_std == 0.] = 1. y_std[y_std == 0.] = 1. # center and scale X if necessary X = (X - X_mean) / X_std y = (y - y_mean) / y_std else: X_mean = np.zeros(1) X_std = np.ones(1) y_mean = np.zeros(1) y_std = np.ones(1) # Calculate matrix of distances D between samples D, ij = l1_cross_distances(X) if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): raise Exception("Multiple input features cannot have the same" " target value.") # Regression matrix and parameters F = self.regr(X) n_samples_F = F.shape[0] if F.ndim > 1: p = F.shape[1] else: p = 1 if n_samples_F != n_samples: raise Exception("Number of rows in F and X do not match. Most " "likely something is going wrong with the " "regression model.") if p > n_samples_F: raise Exception(("Ordinary least squares problem is undetermined " "n_samples=%d must be greater than the " "regression model size p=%d.") % (n_samples, p)) if self.beta0 is not None: if self.beta0.shape[0] != p: raise Exception("Shapes of beta0 and F do not match.") # Set attributes self.X = X self.y = y self.D = D self.ij = ij self.F = F self.X_mean, self.X_std = X_mean, X_std self.y_mean, self.y_std = y_mean, y_std # Determine Gaussian Process model parameters if self.thetaL is not None and self.thetaU is not None: # Maximum Likelihood Estimation of the parameters if self.verbose: print("Performing Maximum Likelihood Estimation of the " "autocorrelation parameters...") self.theta_, self.reduced_likelihood_function_value_, par = \ self._arg_max_reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception("Bad parameter region. " "Try increasing upper bound") else: # Given parameters if self.verbose: print("Given autocorrelation parameters. " "Computing Gaussian Process model parameters...") self.theta_ = self.theta0 self.reduced_likelihood_function_value_, par = \ self.reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception("Bad point. Try increasing theta0.") self.beta = par['beta'] self.gamma = par['gamma'] self.sigma2 = par['sigma2'] self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] if self.storage_mode == 'light': # Delete heavy data (it will be computed again if required) # (it is required only when MSE is wanted in self.predict) if self.verbose: print("Light storage mode specified. " "Flushing autocorrelation matrix...") self.D = None self.ij = None self.F = None self.C = None self.Ft = None self.G = None return self def predict(self, X, eval_MSE=False, batch_size=None): """ This function evaluates the Gaussian Process model at x. Parameters ---------- X : array_like An array with shape (n_eval, n_features) giving the point(s) at which the prediction(s) should be made. eval_MSE : boolean, optional A boolean specifying whether the Mean Squared Error should be evaluated or not. Default assumes evalMSE = False and evaluates only the BLUP (mean prediction). batch_size : integer, optional An integer giving the maximum number of points that can be evaluated simultaneously (depending on the available memory). Default is None so that all given points are evaluated at the same time. Returns ------- y : array_like, shape (n_samples, ) or (n_samples, n_targets) An array with shape (n_eval, ) if the Gaussian Process was trained on an array of shape (n_samples, ) or an array with shape (n_eval, n_targets) if the Gaussian Process was trained on an array of shape (n_samples, n_targets) with the Best Linear Unbiased Prediction at x. MSE : array_like, optional (if eval_MSE == True) An array with shape (n_eval, ) or (n_eval, n_targets) as with y, with the Mean Squared Error at x. """ check_is_fitted(self, "X") # Check input shapes X = check_array(X) n_eval, _ = X.shape n_samples, n_features = self.X.shape n_samples_y, n_targets = self.y.shape # Run input checks self._check_params(n_samples) if X.shape[1] != n_features: raise ValueError(("The number of features in X (X.shape[1] = %d) " "should match the number of features used " "for fit() " "which is %d.") % (X.shape[1], n_features)) if batch_size is None: # No memory management # (evaluates all given points in a single batch run) # Normalize input X = (X - self.X_mean) / self.X_std # Initialize output y = np.zeros(n_eval) if eval_MSE: MSE = np.zeros(n_eval) # Get pairwise componentwise L1-distances to the input training set dx = manhattan_distances(X, Y=self.X, sum_over_features=False) # Get regression function and correlation f = self.regr(X) r = self.corr(self.theta_, dx).reshape(n_eval, n_samples) # Scaled predictor y_ = np.dot(f, self.beta) + np.dot(r, self.gamma) # Predictor y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets) if self.y_ndim_ == 1: y = y.ravel() # Mean Squared Error if eval_MSE: C = self.C if C is None: # Light storage mode (need to recompute C, F, Ft and G) if self.verbose: print("This GaussianProcess used 'light' storage mode " "at instantiation. Need to recompute " "autocorrelation matrix...") reduced_likelihood_function_value, par = \ self.reduced_likelihood_function() self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] rt = linalg.solve_triangular(self.C, r.T, lower=True) if self.beta0 is None: # Universal Kriging u = linalg.solve_triangular(self.G.T, np.dot(self.Ft.T, rt) - f.T, lower=True) else: # Ordinary Kriging u = np.zeros((n_targets, n_eval)) MSE = np.dot(self.sigma2.reshape(n_targets, 1), (1. - (rt ** 2.).sum(axis=0) + (u ** 2.).sum(axis=0))[np.newaxis, :]) MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets) # Mean Squared Error might be slightly negative depending on # machine precision: force to zero! MSE[MSE < 0.] = 0. if self.y_ndim_ == 1: MSE = MSE.ravel() return y, MSE else: return y else: # Memory management if type(batch_size) is not int or batch_size <= 0: raise Exception("batch_size must be a positive integer") if eval_MSE: y, MSE = np.zeros(n_eval), np.zeros(n_eval) for k in range(max(1, n_eval / batch_size)): batch_from = k * batch_size batch_to = min([(k + 1) * batch_size + 1, n_eval + 1]) y[batch_from:batch_to], MSE[batch_from:batch_to] = \ self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y, MSE else: y = np.zeros(n_eval) for k in range(max(1, n_eval / batch_size)): batch_from = k * batch_size batch_to = min([(k + 1) * batch_size + 1, n_eval + 1]) y[batch_from:batch_to] = \ self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y def reduced_likelihood_function(self, theta=None): """ This function determines the BLUP parameters and evaluates the reduced likelihood function for the given autocorrelation parameters theta. Maximizing this function wrt the autocorrelation parameters theta is equivalent to maximizing the likelihood of the assumed joint Gaussian distribution of the observations y evaluated onto the design of experiments X. Parameters ---------- theta : array_like, optional An array containing the autocorrelation parameters at which the Gaussian Process model parameters should be determined. Default uses the built-in autocorrelation parameters (ie ``theta = self.theta_``). Returns ------- reduced_likelihood_function_value : double The value of the reduced likelihood function associated to the given autocorrelation parameters theta. par : dict A dictionary containing the requested Gaussian Process model parameters: sigma2 Gaussian Process variance. beta Generalized least-squares regression weights for Universal Kriging or given beta0 for Ordinary Kriging. gamma Gaussian Process weights. C Cholesky decomposition of the correlation matrix [R]. Ft Solution of the linear equation system : [R] x Ft = F G QR decomposition of the matrix Ft. """ check_is_fitted(self, "X") if theta is None: # Use built-in autocorrelation parameters theta = self.theta_ # Initialize output reduced_likelihood_function_value = - np.inf par = {} # Retrieve data n_samples = self.X.shape[0] D = self.D ij = self.ij F = self.F if D is None: # Light storage mode (need to recompute D, ij and F) D, ij = l1_cross_distances(self.X) if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): raise Exception("Multiple X are not allowed") F = self.regr(self.X) # Set up R r = self.corr(theta, D) R = np.eye(n_samples) * (1. + self.nugget) R[ij[:, 0], ij[:, 1]] = r R[ij[:, 1], ij[:, 0]] = r # Cholesky decomposition of R try: C = linalg.cholesky(R, lower=True) except linalg.LinAlgError: return reduced_likelihood_function_value, par # Get generalized least squares solution Ft = linalg.solve_triangular(C, F, lower=True) try: Q, G = linalg.qr(Ft, econ=True) except: #/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177: # DeprecationWarning: qr econ argument will be removed after scipy # 0.7. The economy transform will then be available through the # mode='economic' argument. Q, G = linalg.qr(Ft, mode='economic') sv = linalg.svd(G, compute_uv=False) rcondG = sv[-1] / sv[0] if rcondG < 1e-10: # Check F sv = linalg.svd(F, compute_uv=False) condF = sv[0] / sv[-1] if condF > 1e15: raise Exception("F is too ill conditioned. Poor combination " "of regression model and observations.") else: # Ft is too ill conditioned, get out (try different theta) return reduced_likelihood_function_value, par Yt = linalg.solve_triangular(C, self.y, lower=True) if self.beta0 is None: # Universal Kriging beta = linalg.solve_triangular(G, np.dot(Q.T, Yt)) else: # Ordinary Kriging beta = np.array(self.beta0) rho = Yt - np.dot(Ft, beta) sigma2 = (rho ** 2.).sum(axis=0) / n_samples # The determinant of R is equal to the squared product of the diagonal # elements of its Cholesky decomposition C detR = (np.diag(C) ** (2. / n_samples)).prod() # Compute/Organize output reduced_likelihood_function_value = - sigma2.sum() * detR par['sigma2'] = sigma2 * self.y_std ** 2. par['beta'] = beta par['gamma'] = linalg.solve_triangular(C.T, rho) par['C'] = C par['Ft'] = Ft par['G'] = G return reduced_likelihood_function_value, par def _arg_max_reduced_likelihood_function(self): """ This function estimates the autocorrelation parameters theta as the maximizer of the reduced likelihood function. (Minimization of the opposite reduced likelihood function is used for convenience) Parameters ---------- self : All parameters are stored in the Gaussian Process model object. Returns ------- optimal_theta : array_like The best set of autocorrelation parameters (the sought maximizer of the reduced likelihood function). optimal_reduced_likelihood_function_value : double The optimal reduced likelihood function value. optimal_par : dict The BLUP parameters associated to thetaOpt. """ # Initialize output best_optimal_theta = [] best_optimal_rlf_value = [] best_optimal_par = [] if self.verbose: print("The chosen optimizer is: " + str(self.optimizer)) if self.random_start > 1: print(str(self.random_start) + " random starts are required.") percent_completed = 0. # Force optimizer to fmin_cobyla if the model is meant to be isotropic if self.optimizer == 'Welch' and self.theta0.size == 1: self.optimizer = 'fmin_cobyla' if self.optimizer == 'fmin_cobyla': def minus_reduced_likelihood_function(log10t): return - self.reduced_likelihood_function( theta=10. ** log10t)[0] constraints = [] for i in range(self.theta0.size): constraints.append(lambda log10t, i=i: log10t[i] - np.log10(self.thetaL[0, i])) constraints.append(lambda log10t, i=i: np.log10(self.thetaU[0, i]) - log10t[i]) for k in range(self.random_start): if k == 0: # Use specified starting point as first guess theta0 = self.theta0 else: # Generate a random starting point log10-uniformly # distributed between bounds log10theta0 = (np.log10(self.thetaL) + self.random_state.rand(*self.theta0.shape) * np.log10(self.thetaU / self.thetaL)) theta0 = 10. ** log10theta0 # Run Cobyla try: log10_optimal_theta = \ optimize.fmin_cobyla(minus_reduced_likelihood_function, np.log10(theta0).ravel(), constraints, iprint=0) except ValueError as ve: print("Optimization failed. Try increasing the ``nugget``") raise ve optimal_theta = 10. ** log10_optimal_theta optimal_rlf_value, optimal_par = \ self.reduced_likelihood_function(theta=optimal_theta) # Compare the new optimizer to the best previous one if k > 0: if optimal_rlf_value > best_optimal_rlf_value: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta else: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta if self.verbose and self.random_start > 1: if (20 * k) / self.random_start > percent_completed: percent_completed = (20 * k) / self.random_start print("%s completed" % (5 * percent_completed)) optimal_rlf_value = best_optimal_rlf_value optimal_par = best_optimal_par optimal_theta = best_optimal_theta elif self.optimizer == 'Welch': # Backup of the given atrributes theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU corr = self.corr verbose = self.verbose # This will iterate over fmin_cobyla optimizer self.optimizer = 'fmin_cobyla' self.verbose = False # Initialize under isotropy assumption if verbose: print("Initialize under isotropy assumption...") self.theta0 = check_array(self.theta0.min()) self.thetaL = check_array(self.thetaL.min()) self.thetaU = check_array(self.thetaU.max()) theta_iso, optimal_rlf_value_iso, par_iso = \ self._arg_max_reduced_likelihood_function() optimal_theta = theta_iso + np.zeros(theta0.shape) # Iterate over all dimensions of theta allowing for anisotropy if verbose: print("Now improving allowing for anisotropy...") for i in self.random_state.permutation(theta0.size): if verbose: print("Proceeding along dimension %d..." % (i + 1)) self.theta0 = check_array(theta_iso) self.thetaL = check_array(thetaL[0, i]) self.thetaU = check_array(thetaU[0, i]) def corr_cut(t, d): return corr(check_array(np.hstack([optimal_theta[0][0:i], t[0], optimal_theta[0][(i + 1)::]])), d) self.corr = corr_cut optimal_theta[0, i], optimal_rlf_value, optimal_par = \ self._arg_max_reduced_likelihood_function() # Restore the given atrributes self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU self.corr = corr self.optimizer = 'Welch' self.verbose = verbose else: raise NotImplementedError("This optimizer ('%s') is not " "implemented yet. Please contribute!" % self.optimizer) return optimal_theta, optimal_rlf_value, optimal_par def _check_params(self, n_samples=None): # Check regression model if not callable(self.regr): if self.regr in self._regression_types: self.regr = self._regression_types[self.regr] else: raise ValueError("regr should be one of %s or callable, " "%s was given." % (self._regression_types.keys(), self.regr)) # Check regression weights if given (Ordinary Kriging) if self.beta0 is not None: self.beta0 = np.atleast_2d(self.beta0) if self.beta0.shape[1] != 1: # Force to column vector self.beta0 = self.beta0.T # Check correlation model if not callable(self.corr): if self.corr in self._correlation_types: self.corr = self._correlation_types[self.corr] else: raise ValueError("corr should be one of %s or callable, " "%s was given." % (self._correlation_types.keys(), self.corr)) # Check storage mode if self.storage_mode != 'full' and self.storage_mode != 'light': raise ValueError("Storage mode should either be 'full' or " "'light', %s was given." % self.storage_mode) # Check correlation parameters self.theta0 = np.atleast_2d(self.theta0) lth = self.theta0.size if self.thetaL is not None and self.thetaU is not None: self.thetaL = np.atleast_2d(self.thetaL) self.thetaU = np.atleast_2d(self.thetaU) if self.thetaL.size != lth or self.thetaU.size != lth: raise ValueError("theta0, thetaL and thetaU must have the " "same length.") if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL): raise ValueError("The bounds must satisfy O < thetaL <= " "thetaU.") elif self.thetaL is None and self.thetaU is None: if np.any(self.theta0 <= 0): raise ValueError("theta0 must be strictly positive.") elif self.thetaL is None or self.thetaU is None: raise ValueError("thetaL and thetaU should either be both or " "neither specified.") # Force verbose type to bool self.verbose = bool(self.verbose) # Force normalize type to bool self.normalize = bool(self.normalize) # Check nugget value self.nugget = np.asarray(self.nugget) if np.any(self.nugget) < 0.: raise ValueError("nugget must be positive or zero.") if (n_samples is not None and self.nugget.shape not in [(), (n_samples,)]): raise ValueError("nugget must be either a scalar " "or array of length n_samples.") # Check optimizer if self.optimizer not in self._optimizer_types: raise ValueError("optimizer should be one of %s" % self._optimizer_types) # Force random_start type to int self.random_start = int(self.random_start)
jmetzen/scikit-learn
sklearn/gaussian_process/gaussian_process.py
Python
bsd-3-clause
34,896
[ "Gaussian" ]
a9867c3a33cb4b53a41fc44ef1fd7b424176290c7575f490903dcbd61b5fb011
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layers that operate regularization via the addition of noise. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras.engine import Layer from tensorflow.python.keras._impl.keras.engine.base_layer import shape_type_conversion from tensorflow.python.util.tf_export import tf_export @tf_export('keras.layers.GaussianNoise') class GaussianNoise(Layer): """Apply additive zero-centered Gaussian noise. This is useful to mitigate overfitting (you could see it as a form of random data augmentation). Gaussian Noise (GS) is a natural choice as corruption process for real valued inputs. As it is a regularization layer, it is only active at training time. Arguments: stddev: float, standard deviation of the noise distribution. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, stddev, **kwargs): super(GaussianNoise, self).__init__(**kwargs) self.supports_masking = True self.stddev = stddev def call(self, inputs, training=None): def noised(): return inputs + K.random_normal( shape=K.shape(inputs), mean=0., stddev=self.stddev) return K.in_train_phase(noised, inputs, training=training) def get_config(self): config = {'stddev': self.stddev} base_config = super(GaussianNoise, self).get_config() return dict(list(base_config.items()) + list(config.items())) @shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @tf_export('keras.layers.GaussianDropout') class GaussianDropout(Layer): """Apply multiplicative 1-centered Gaussian noise. As it is a regularization layer, it is only active at training time. Arguments: rate: float, drop probability (as with `Dropout`). The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate))`. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, rate, **kwargs): super(GaussianDropout, self).__init__(**kwargs) self.supports_masking = True self.rate = rate def call(self, inputs, training=None): if 0 < self.rate < 1: def noised(): stddev = np.sqrt(self.rate / (1.0 - self.rate)) return inputs * K.random_normal( shape=K.shape(inputs), mean=1.0, stddev=stddev) return K.in_train_phase(noised, inputs, training=training) return inputs def get_config(self): config = {'rate': self.rate} base_config = super(GaussianDropout, self).get_config() return dict(list(base_config.items()) + list(config.items())) @shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @tf_export('keras.layers.AlphaDropout') class AlphaDropout(Layer): """Applies Alpha Dropout to the input. Alpha Dropout is a `Dropout` that keeps mean and variance of inputs to their original values, in order to ensure the self-normalizing property even after this dropout. Alpha Dropout fits well to Scaled Exponential Linear Units by randomly setting activations to the negative saturation value. Arguments: rate: float, drop probability (as with `Dropout`). The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate))`. seed: A Python integer to use as random seed. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, rate, noise_shape=None, seed=None, **kwargs): super(AlphaDropout, self).__init__(**kwargs) self.rate = rate self.noise_shape = noise_shape self.seed = seed self.supports_masking = True def _get_noise_shape(self, inputs): return self.noise_shape if self.noise_shape else K.shape(inputs) def call(self, inputs, training=None): if 0. < self.rate < 1.: noise_shape = self._get_noise_shape(inputs) def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed): # pylint: disable=missing-docstring alpha = 1.6732632423543772848170429916717 scale = 1.0507009873554804934193349852946 alpha_p = -alpha * scale kept_idx = K.greater_equal( K.random_uniform(noise_shape, seed=seed), rate) kept_idx = K.cast(kept_idx, K.floatx()) # Get affine transformation params a = ((1 - rate) * (1 + rate * alpha_p**2))**-0.5 b = -a * alpha_p * rate # Apply mask x = inputs * kept_idx + alpha_p * (1 - kept_idx) # Do affine transformation return a * x + b return K.in_train_phase(dropped_inputs, inputs, training=training) return inputs def get_config(self): config = {'rate': self.rate} base_config = super(AlphaDropout, self).get_config() return dict(list(base_config.items()) + list(config.items())) @shape_type_conversion def compute_output_shape(self, input_shape): return input_shape
Xeralux/tensorflow
tensorflow/python/keras/_impl/keras/layers/noise.py
Python
apache-2.0
6,255
[ "Gaussian" ]
befdbff6f747ef3b2f3829366d68354a25e1dd7785448c41327b94149bc2c7b8
#!/usr/bin/env python import os, sys from ConfigParser import ConfigParser from optparse import OptionParser default_config = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '..', 'config/galaxy.ini') ) parser = OptionParser() parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', default=default_config ) ( options, args ) = parser.parse_args() def init(): options.config = os.path.abspath( options.config ) sys.path.append( os.path.join( os.path.dirname( __file__ ), '..', 'lib' ) ) from galaxy import eggs import pkg_resources config = ConfigParser( dict( file_path = 'database/files', database_connection = 'sqlite:///database/universe.sqlite?isolation_level=IMMEDIATE' ) ) config.read( options.config ) from galaxy.model import mapping return mapping.init( config.get( 'app:main', 'file_path' ), config.get( 'app:main', 'database_connection' ), create_tables = False ) if __name__ == '__main__': print 'Loading Galaxy model...' model = init() sa_session = model.context.current set = 0 dataset_count = sa_session.query( model.Dataset ).count() print 'Processing %i datasets...' % dataset_count percent = 0 print 'Completed %i%%' % percent, sys.stdout.flush() for i, dataset in enumerate( sa_session.query( model.Dataset ).enable_eagerloads( False ).yield_per( 1000 ) ): if dataset.total_size is None: dataset.set_total_size() set += 1 if not set % 1000: sa_session.flush() new_percent = int( float(i) / dataset_count * 100 ) if new_percent != percent: percent = new_percent print '\rCompleted %i%%' % percent, sys.stdout.flush() sa_session.flush() print 'Completed 100%%'
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/scripts/set_dataset_sizes.py
Python
gpl-3.0
1,878
[ "Galaxy" ]
2b638787c2ccba9315a741fd2f7610212782f98fa74ef09e301e01683d5bde39
## INFO ######################################################################## ## ## ## COUBLET ## ## ======= ## ## ## ## Cross-platform desktop client to follow posts from COUB ## ## Version: 0.6.95.221 (20141003) ## ## ## ## File: presenters/post.py ## ## ## ## Designed and written by Peter Varo. Copyright (c) 2014 ## ## License agreement is provided in the LICENSE file ## ## For more info visit: https://github.com/petervaro/coub ## ## ## ## Copyright (c) 2014 Coub Ltd and/or its suppliers and licensors, ## ## 5 Themistokli Dervi Street, Elenion Building, 1066 Nicosia, Cyprus. ## ## All rights reserved. COUB (TM) is a trademark of Coub Ltd. ## ## http://coub.com ## ## ## ######################################################################## INFO ## from views.post import CoubletPostView #------------------------------------------------------------------------------# class CoubletPostPresenter: #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # def __init__(self, root): # Store static values self._root = root self._post = CoubletPostView() #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # def show_view(self): self._post.show() #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # def hide_view(self): self._post.hide() #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # def get_view(self): return self._post #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # def load(self, packet): self._post.load(packet) #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # def update(self, packet): self._post.update(packet) #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # def reset_unseen(self): self._post.kill_if_not_visible()
petervaro/coublet
presenters/post.py
Python
mit
2,820
[ "VisIt" ]
3f8c36ea03658c45ae841161c59592e579eaedcd58212c3095653c229463e095
from autonomous import Autonomous from mecanum_drive import MecanumDrive from intake import Intake from shooter import Shooter from shooter_service import ShooterService try: import wpilib except ImportError: from pyfrc import wpilib class Aimbot(wpilib.SimpleRobot): def __init__(self): super().__init__() self.drive_stick = wpilib.Joystick(1) self.arm_stick = wpilib.Joystick(2) self.front_right_motor = wpilib.Jaguar(2) self.front_left_motor = wpilib.Jaguar(1) self.back_left_motor = wpilib.Jaguar(3) self.back_right_motor = wpilib.Jaguar(4) self.intake_wheels_motor = wpilib.Jaguar(10) self.intake_arm_motor = wpilib.Jaguar(6) self.shooter_servo = wpilib.Servo(7) self.shooter_motor = wpilib.Jaguar(5) self.encoder = wpilib.Encoder(1, 2, True) self.mecanum_drive = MecanumDrive( self.front_right_motor, self.front_left_motor, self.back_right_motor, self.back_left_motor, self.drive_stick ) self.intake = Intake(self.intake_wheels_motor, self.intake_arm_motor, self.arm_stick ) self.shooter_service = ShooterService(self.shooter_motor, self.shooter_servo, self.arm_stick ) self.shooter = Shooter(self.shooter_motor, self.encoder, self.shooter_servo, self.arm_stick, self.shooter_service ) self.autonomous = Autonomous( self.shooter_service, self.intake_arm_motor, self.front_left_motor, self.front_right_motor, self.back_left_motor, self.back_right_motor ) def Autonomous(self): self.GetWatchdog().SetEnabled(False) self.autonomous.reset() while self.IsAutonomous() and self.IsEnabled(): self.autonomous.iterate() wpilib.Wait(0.01) def OperatorControl(self): dog = self.GetWatchdog() dog.SetEnabled(True) dog.SetExpiration(0.25) while self.IsOperatorControl() and self.IsEnabled(): dog.Feed() self.mecanum_drive.iterate() self.intake.iterate() self.shooter.iterate() self.shooter_service.iterate() wpilib.Wait(0.04) def run(): robot = Aimbot() robot.StartCompetition() return robot if __name__ == '__main__': wpilib.run()
eshsrobotics/aimbot
robot.py
Python
mit
2,293
[ "Jaguar" ]
cf98b863968959a7b9cc1791428b34f85e158ddb5c60161733ee97f9c4c0e7a2
from __future__ import absolute_import from __future__ import print_function import sys, os, yaml, glob import subprocess import string import sys from nougat import common def run(global_config, sample_config): sorted_libraries_by_insert = common._sort_libraries_by_insert(sample_config) #Check if the user has specified tools, if not select default list of tools if "tools" not in sample_config or len(sample_config["tools"]) == 0: sample_config["tools"] = ["soapdenovo"] #Execute the commands now for command in sample_config["tools"]: command_fn = getattr( sys.modules[__name__] , "_run_{}".format(command)) sample_config = command_fn(global_config, sample_config, sorted_libraries_by_insert) def _run_abyss(global_config, sample_config, sorted_libraries_by_insert): ########## ACQUIRE ALL THE INFO AND CREATE THE ASSEMBLY FOLDER assembler = "abyss" outputName = sample_config["output"] currentDirectory = os.getcwd() assemblyDirectory = os.path.join(currentDirectory, assembler) # in abyss case there is no exectuable programBIN = global_config["Tools"][assembler]["bin"] program_options = global_config["Tools"][assembler]["options"] sorted_libraries_by_insert = common._sort_libraries_by_insert(sample_config) if _prepare_folder_structure("abyss", assemblyDirectory) == 0: os.chdir(assemblyDirectory) else: return sample_config ########### HERE IT START THE SPECIFIC ASSEMBLER PART assembler_stdOut = open("abyss.stdOut", "a") assembler_stdErr = open("abyss.stdErr", "a") program=os.path.join(programBIN, "abyss-pe") command = "" command += "{} ".format(program) threads = 8 # default for UPPMAX if "threads" in sample_config : threads = sample_config["threads"] command += "np={} ".format(threads) kmer = 54 if "kmer" in sample_config: kmer = sample_config["kmer"] command += "k={} ".format(kmer) libraries = {} for library, libraryInfo in sorted_libraries_by_insert: read1 = libraryInfo["pair1"] read2 = libraryInfo["pair2"] orientation = libraryInfo["orientation"] insert = libraryInfo["insert"] std = libraryInfo["std"] if orientation=="innie" or orientation=="none": if read2 is None: # check if this is the first time I insert a se file if "se" not in libraries: libraries["se"] = "se=\'" libraries["se"] = libraries["se"] + read1 else: if not "lib" in libraries: libraries["lib"] = {} libName = insert # lib name is the insert size if not libName in libraries["lib"]: libraries["lib"][libName] = "" libraries["lib"][libName] += "{} {} ".format(read1, read2) else: if not "mp" in libraries: libraries["mp"] = {} libName = format(insert) if not libName in libraries["mp"]: libraries["mp"][libName] = "" libraries["mp"][libName] += "{} {} ".format(read1, read2) #now create the command command += "name={} ".format(outputName) librariesSE = "" librariesPE = "" librariesMP = "" if "se" in libraries: libraries["se"] = libraries["se"] + "\'" librariesSE = libraries["se"] if "lib" in libraries: lib="lib=\'" for libPE, libPEreads in sorted(libraries["lib"].items()): lib = lib + "lib{} ".format(libPE) librariesPE += " lib{}=\'{}\' ".format(libPE,libPEreads) lib=lib + "\' " command += "{} ".format(lib) if "mp" in libraries: mp="mp=\'" for libMP, libMPreads in sorted(libraries["mp"].items()): mp = mp + "lib{} ".format(libMP) librariesMP += " lib{}=\'{}\' ".format(libMP,libMPreads) mp=mp + "\' " command += "{} ".format(mp) command += "{} ".format(librariesSE) command += "{} ".format(librariesPE) command += "{} ".format(librariesMP) common.print_command(command) if common.check_dryrun(sample_config): os.chdir("..") return sample_config os.makedirs(os.path.join(assemblyDirectory, "runABySS")) os.chdir("runABySS") returnValue = 0 returnValue = subprocess.call(command, stdout=assembler_stdOut, stderr=assembler_stdErr, shell=True) os.chdir("..") flags = sample_config.get("flags", []) if returnValue == 0 and not common.check_dryrun(sample_config): if os.path.exists(os.path.join("runABySS","{}-contigs.fa".format( outputName))): subprocess.call(["cp", os.path.join("runABySS", "{}-contigs.fa".format(outputName)), "{}.ctg.fasta".format(outputName) ]) subprocess.call(["cp", os.path.join("runABySS", "{}-scaffolds.fa".format(outputName)), "{}.scf.fasta".format(outputName) ]) if not "keep_tmp_files" in flags: subprocess.call(["rm", "-r", "runABySS"]) elif not common.check_dryrun(sample_config): print("something wrong with ABySS -> no contig file generated") return sample_config else: print("ABySS terminated with an error. Please check running folder", "for more informations") os.chdir("..") return sample_config def _run_allpaths(global_config, sample_config, sorted_libraries_by_insert): ########## ACQUIRE ALL THE INFO AND CREATE THE ASSEMBLY FOLDER assembler = "allpaths" outputName = sample_config["output"] currentDirectory = os.getcwd() assemblyDirectory = os.path.join(currentDirectory, assembler) # in abyss case there is no exectuable programBIN = global_config["Tools"][assembler]["bin"] program_options = global_config["Tools"][assembler]["options"] sorted_libraries_by_insert = common._sort_libraries_by_insert(sample_config) if _prepare_folder_structure("allpaths", assemblyDirectory) == 0: os.chdir(assemblyDirectory) else: return sample_config inGroups_file = open("in_groups.csv", "w") inLibs_file = open("in_libs.csv", "w") inGroups_file.write("group_name, library_name, file_name\n") inLibs_file.write("library_name, project_name, organism_name, type, " "paired, frag_size, frag_stddev, insert_size, insert_stddev, " "read_orientation,genomic_start, genomic_end\n") librariesForInLibs = [] librariesForInLibsDict = {} group_name = 1; for library, libraryInfo in sorted_libraries_by_insert: read1 =libraryInfo["pair1"] read2 =libraryInfo["pair2"] orientation = libraryInfo["orientation"] insert = libraryInfo["insert"] std = libraryInfo["std"] if orientation=="innie": path, fqfile=os.path.split(read1) if "_1.fastq" in fqfile: fqfile = fqfile.replace("_1.fastq", "_?.fastq") elif "_R1_" in fqfile: fqfile = fqfile.replace("_R1_", "_R?_") else: print("error file format not supported {}".format(fqfile)) return sample_config inGroups_file.write("PE{}, lib{}, {}\n".format(group_name, insert, os.path.join(path, fqfile))) group_name += 1 if insert not in librariesForInLibsDict: librariesForInLibsDict[insert] = insert librariesForInLibs.append("lib{}, genome, genome, fragment, 1, " "{}, {}, , , inward, 0, 0\n".format(insert,insert, std)) elif orientation=="outtie": path, fqfile = os.path.split(read1) if "_1.fastq" in fqfile: fqfile = fqfile.replace("_1.fastq", "_?.fastq") elif "_R1_" in fqfile: fqfile = fqfile.replace("_R1_", "_R?_") else: print("error file format not supported {}".format(file)) return sample_config inGroups_file.write("MP{}, lib{}, {}\n".format(group_name, insert, os.path.join(path, fqfile))) group_name += 1 if insert not in librariesForInLibsDict: librariesForInLibsDict[insert] = insert librariesForInLibs.append("lib{}, genome, genome, fragment, 1, " ", , {}, {}, outward, 0, 0\n".format(insert,insert, std)) else: print("all paths support only innies and outties") inGroups_file.close() for lib in librariesForInLibs: inLibs_file.write(lib) inLibs_file.close() #NOW RUN ALLPATHS FOR REAL program=os.path.join(programBIN, "PrepareAllPathsInputs.pl") os.mkdir("data_dir") data_dir = os.path.join(assemblyDirectory, "data_dir") ploidy = "PLOIDY=1" if len(program_options) > 0: if len(program_options) >1: print("Running ALlpaths only one parameter accepted as option", "here: PLOIDY=2") return sample_config if program_options[0] == "PLOIDY=2": ploidy = "PLOIDY=2" else: print("Running ALlpaths only one parameter accepted as option", "here: PLOIDY=2") return sample_config command = [program , "DATA_DIR={}".format(data_dir), ploidy, "PICARD_TOOLS_DIR={}".format( global_config["Tools"]["picard"]["bin"]), "FORCE_PHRED=True", "PHRED_64=False", "IN_GROUPS_CSV={}".format(os.path.join(assemblyDirectory,"in_groups.csv")), "IN_LIBS_CSV={}".format(os.path.join(assemblyDirectory,"in_libs.csv"))] if common.check_dryrun(sample_config): common.print_command(command) program = os.path.join(programBIN, "RunAllPathsLG") command = [program, "PRE={}".format(assemblyDirectory), "REFERENCE_NAME=.", "DATA_SUBDIR=data_dir", "RUN=allpaths", "SUBDIR=run"] common.print_command(command) os.chdir("..") return sample_config assembler_stdOut = open("allpaths_PrepareAllPathsInputs.stdOut", "w") assembler_stdErr = open("allpaths_PrepareAllPathsInputs.stdErr", "w") common.print_command(command) returnValue = subprocess.call(command, stdout=assembler_stdOut, stderr=assembler_stdErr) assembler_stdOut.close() assembler_stdErr.close() flags = sample_config.get("flags", []) if returnValue == 0: program = os.path.join(programBIN, "RunAllPathsLG") command = [program, "PRE={}".format(assemblyDirectory), "REFERENCE_NAME=.", "DATA_SUBDIR=data_dir", "RUN=allpaths", "SUBDIR=run", "HAPLOIDIFY=True"] common.print_command(command) assembler_stdOut = open("allpaths_RunAllPathsLG.stdOut", "w") assembler_stdErr = open("allpaths_RunAllPathsLG.stdErr", "w") returnValue = subprocess.call(command, stdout=assembler_stdOut, stderr=assembler_stdErr) if returnValue != 0: print("ALLPATHS RunAllPathsLG terminated with an error. Please", "check running folder for more informations") os.chdir("..") return sample_config else: # save results assembly_dir = os.path.join("data_dir", "allpaths", "ASSEMBLIES", "run") if os.path.exists(os.path.join(assembly_dir, "final.assembly.fasta")): exit_code = subprocess.call(["cp", os.path.join(assembly_dir, "final.contigs.fasta"), "{}.ctg.fasta".format(outputName)]) exit_code += subprocess.call(["cp", os.path.join(assembly_dir, "final.assembly.fasta"), "{}.scf.fasta".format(outputName)]) if not "keep_tmp_files" in flags and exit_code == 0: subprocess.call(["rm", "-r", "data_dir"]) else: print("something wrong with Allpaths > no contig file generated") os.chdir("..") return sample_config else: print("ALLPATHS PrepareAllPathInputs terminated with an error. " "Please check running folder for more informations") os.chdir("..") return sample_config os.chdir("..") return sample_config def _run_cabog(global_config, sample_config, sorted_libraries_by_insert): ########## ACQUIRE ALL THE INFO AND CREATE THE ASSEMBLY FOLDER assembler = "cabog" outputName = sample_config["output"] currentDirectory = os.getcwd() assemblyDirectory = os.path.join(currentDirectory, assembler) # in cabog case there is no exectuable programBIN = global_config["Tools"][assembler]["bin"] program_options = global_config["Tools"][assembler]["options"] sorted_libraries_by_insert = common._sort_libraries_by_insert(sample_config) if _prepare_folder_structure(assembler, assemblyDirectory) == 0: os.chdir(assemblyDirectory) else: return sample_config ########### HERE IT START THE SPECIFIC ASSEMBLER PART sys.path.insert(0, programBIN) libraries = 1 for library, libraryInfo in sorted_libraries_by_insert: command_fastqToCA = os.path.join(programBIN, "fastqToCA") read1=libraryInfo["pair1"] read2=libraryInfo["pair2"] orientation = libraryInfo["orientation"] insert = libraryInfo["insert"] std = libraryInfo["std"] command_fastqToCA += " -libraryname " command_fastqToCA += " {}_{}".format(outputName, libraries) command_fastqToCA += " -insertsize " command_fastqToCA += " {} {} ".format(insert,std) command_fastqToCA += " -technology " command_fastqToCA += " illumina " command_fastqToCA += " -type " command_fastqToCA += " illumina " if orientation=="innie" or orientation=="none" : command_fastqToCA += " -innie " if read2 is None: command_fastqToCA += " -reads " command_fastqToCA += " {} ".format(read1) else: command_fastqToCA += " -mates " command_fastqToCA += " {},{} ".format(read1, read2) elif orientation=="outtie": command_fastqToCA += " -outtie " command_fastqToCA += " -mates " command_fastqToCA += " {},{} ".format(read1, read2) command_fastqToCA += " > " command_fastqToCA += " {}_{}.frg ".format(outputName, libraries) common.print_command(command_fastqToCA) if not common.check_dryrun(sample_config): cabog_stdOut = open("cabog_fastqToCA.stdOut", "w") cabog_stdErr = open("cabogfastqToCA.stdErr", "w") subprocess.call(command_fastqToCA, stderr=cabog_stdErr, shell=True) cabog_stdOut.close() cabog_stdErr.close() libraries += 1 command_runCA = os.path.join(programBIN, "runCA") command_runCA += " -d runCABOGfolder -p {} *frg".format(outputName) common.print_command(command_runCA) if common.check_dryrun(sample_config): return sample_config returnValue = 0 cabog_stdOut = open("cabog_runCA.stdOut", "w") cabog_stdErr = open("cabog_runCA.stdErr", "w") returnValue = subprocess.call(command_runCA, stdout=cabog_stdOut, stderr=cabog_stdErr, shell=True) flags = sample_config.get("flags", []) if returnValue == 0: #assembly succed, remove files and save assembly if os.path.exists(os.path.join("runCABOGfolder","9-terminator", "{}.ctg.fasta".format(outputName))): subprocess.call(["cp", os.path.join("runCABOGfolder","9-terminator", "{}.ctg.fasta".format(outputName)), "{}.ctg.fasta".format(outputName)]) subprocess.call(["cp", os.path.join("runCABOGfolder","9-terminator", "{}.scf.fasta".format(outputName)), "{}.scf.fasta".format(outputName)]) if not "keep_tmp_files" in flags: subprocess.call(["rm", "-r", "runCABOGfolder"]) else: print("something wrong with CABOG -> no contig file generated") else: print("CABOG terminated with an error. Please check running folder", "for more informations") os.chdir("..") return sample_config def _run_masurca(global_config, sample_config,sorted_libraries_by_insert): ########## ACQUIRE ALL THE INFO AND CREATE THE ASSEMBLY FOLDER assembler = "masurca" outputName = sample_config["output"] currentDirectory = os.getcwd() assemblyDirectory = os.path.join(currentDirectory, assembler) # in cabog case there is no exectuable programBIN = global_config["Tools"][assembler]["bin"] program_options = global_config["Tools"][assembler]["options"] sorted_libraries_by_insert = common._sort_libraries_by_insert(sample_config) if _prepare_folder_structure(assembler, assemblyDirectory) == 0: os.chdir(assemblyDirectory) else: return sample_config ########### HERE IT START THE SPECIFIC ASSEMBLER PART masurca_config_file = open("configuration.txt", "w") masurca_config_file.write("DATA\n") allTheLetters = string.lowercase libraryPE = "p" libraryPEnum = 0 libraryMP = "m" libraryMPnum = 0 #TODO: single ended reads for library, libraryInfo in sorted_libraries_by_insert: read1=libraryInfo["pair1"] read2=libraryInfo["pair2"] orientation = libraryInfo["orientation"] insert = libraryInfo["insert"] std = libraryInfo["std"] if orientation=="innie": if read2 is not None: configurationLine = "PE = {}{} {} {} {} {}".format(libraryPE, allTheLetters[libraryPEnum], insert, std, read1, read2) masurca_config_file.write("{}\n".format(configurationLine)) libraryPEnum += 1 #TODO: check when more than 21 PE libraries ae specified elif orientation=="outtie": configurationLine = "JUMP = {}{} {} {} {} {}".format(libraryMP, allTheLetters[libraryMPnum], insert, std, read1, read2) masurca_config_file.write("{}\n".format(configurationLine)) libraryMPnum += 1 #TODO: check when more than 21 PE libraries ae specified masurca_config_file.write("END\n") masurca_config_file.write("\n") masurca_config_file.write("PARAMETERS\n") #this is k-mer size for deBruijn graph values between 25 and 101 are #supported, auto will compute the optimal size based on the read data #and GC content masurca_config_file.write("GRAPH_KMER_SIZE=auto\n") #set this to 1 for Illumina-only assemblies and to 0 if you have 2x or #more long (Sanger, 454) reads masurca_config_file.write("USE_LINKING_MATES=1\n") #this parameter is useful if you have too many jumping library mates. #See manual for explanation about settings based on genome length if sample_config["genomeSize"] > 10000000: masurca_config_file.write("LIMIT_JUMP_COVERAGE = 1000\n") else: masurca_config_file.write("LIMIT_JUMP_COVERAGE = 60\n") #these are the additional parameters to Celera Assembler. do not worry #about performance, number or processors or batch sizes -- these are #computed automatically. for mammals do not set cgwErrorRate above 0.15!!! if sample_config["genomeSize"] > 1500000000: masurca_config_file.write("CA_PARAMETERS = ovlMerSize=30 \ cgwErrorRate=0.15 ovlMemory=4GB\n") else: masurca_config_file.write("CA_PARAMETERS = ovlMerSize=30 \ cgwErrorRate=0.25 ovlMemory=4GB\n") #auto-detected number of cpus to use threads = 8 # default for UPPMAX if "threads" in sample_config : threads = sample_config["threads"] masurca_config_file.write("NUM_THREADS= {}\n".format(threads)) #this is mandatory jellyfish hash size ---- jellyfish hash size, #set this to about 10x the genome size. JF_SIZE = sample_config["genomeSize"] * 11 masurca_config_file.write("JF_SIZE={}\n".format(JF_SIZE)) #this specifies if we do (1) or do not (0) want to trim long runs of #homopolymers (e.g. GGGGGGGG) from 3' read ends, use it for high GC genomes masurca_config_file.write("DO_HOMOPOLYMER_TRIM=0\n") masurca_config_file.write("END\n") masurca_config_file.write("\n") masurca_config_file.close() if common.check_dryrun(sample_config): os.chdir("..") return sample_config masurca_stdOut = open("masurca.stdOut", "w") masurca_stdErr = open("masurca.stdErr", "w") os.mkdir("runMASURCA") os.chdir("runMASURCA") command = [os.path.join(programBIN,"bin/masurca") , "../configuration.txt"] common.print_command(command) subprocess.call(command, stdout=masurca_stdOut, stderr=masurca_stdErr) if not os.path.exists("assemble.sh"): print("MaSuRCA: assemble.sh not created. Unknown failure") return sample_config command = ["./assemble.sh"] common.print_command(command) returnValue = subprocess.call(command, stdout=masurca_stdOut, stderr=masurca_stdErr) os.chdir("..") flags = sample_config.get("flags", []) if returnValue == 0: if os.path.exists(os.path.join( "runMASURCA","CA/10-gapclose/genome.scf.fasta")): subprocess.call(["cp", os.path.join( "runMASURCA","CA/10-gapclose/genome.ctg.fasta"), "{}.ctg.fasta".format(outputName) ]) subprocess.call(["cp", os.path.join( "runMASURCA","CA/10-gapclose/genome.scf.fasta"), "{}.scf.fasta".format(outputName) ]) if not "keep_tmp_files" in flags: subprocess.call(["rm", "-r", "runMASURCA"]) else: print("something wrong with MaSuRCA -> no contig file generated") else: print("MaSuRCA terminated with an error. Please check running folder", "for more informations") return sample_config os.chdir("..") return sample_config def _run_soapdenovo(global_config, sample_config, sorted_libraries_by_insert): ########## ACQUIRE ALL THE INFO AND CREATE THE ASSEMBLY FOLDER assembler = "soapdenovo" outputName = sample_config["output"] currentDirectory = os.getcwd() assemblyDirectory = os.path.join(currentDirectory, assembler) # in cabog case there is no exectuable programBIN = global_config["Tools"][assembler]["bin"] program_options = global_config["Tools"][assembler]["options"] sorted_libraries_by_insert = common._sort_libraries_by_insert(sample_config) if _prepare_folder_structure(assembler, assemblyDirectory) == 0: os.chdir(assemblyDirectory) else: return sample_config ########### HERE IT START THE SPECIFIC ASSEMBLER PART kmer = 54 if "kmer" in sample_config: kmer = sample_config["kmer"] threads = ["-p", "8"] # default for UPPMAX if "threads" in sample_config: threads = ["-p", "{}".format(sample_config["threads"])] soap_config_file = open("configuration.txt", "w") soap_config_file.write("max_rd_len=150\n") #TODO make this a parameter in the options rank = 1 for library, libraryInfo in sorted_libraries_by_insert: soap_config_file.write("[LIB]\n") read1 = libraryInfo["pair1"] read2 = libraryInfo["pair2"] orientation = libraryInfo["orientation"] insert = libraryInfo["insert"] std = libraryInfo["std"] soap_config_file.write("avg_ins={}\n".format(insert)) soap_config_file.write("rank={}\n".format(rank)) rank += 1 soap_config_file.write("map_len=30\n") if orientation=="innie" or orientation=="none": soap_config_file.write("asm_flags=3\n") soap_config_file.write("pair_num_cutoff=3\n") soap_config_file.write("reverse_seq=0\n") if read2 is None: soap_config_file.write("q={}\n".format(read1)) else: soap_config_file.write("q1={}\n".format(read1)) soap_config_file.write("q2={}\n".format(read2)) elif orientation=="outtie": soap_config_file.write("asm_flags=2\n") soap_config_file.write("pair_num_cutoff=5\n") soap_config_file.write("reverse_seq=1\n") soap_config_file.write("q1={}\n".format(read1)) soap_config_file.write("q2={}\n".format(read2)) soap_config_file.close() assembler_stdOut = open("soap.stdOut", "w") assembler_stdErr = open("soap.stdErr", "w") os.makedirs(os.path.join(assemblyDirectory, "runSOAP")) os.chdir("runSOAP") #TODO : lots of missing options command = [programBIN , "all", "-s", "{}".format(os.path.join(assemblyDirectory, "configuration.txt")), "-K", "{}".format(kmer), "-L", "500", "-o", "soapAssembly", threads[0], threads[1] ] common.print_command(command) returnValue = 0 if not common.check_dryrun(sample_config): subprocess.call(command, stdout=assembler_stdOut, stderr=assembler_stdErr) else: os.chdir("..") os.chdir("..") return sample_config os.chdir("..") flags = sample_config.get("flags", []) if returnValue == 0: if(os.path.exists(os.path.join("runSOAP","soapAssembly.scafSeq"))): subprocess.call(["cp", os.path.join("runSOAP", "soapAssembly.scafSeq"), "{}.scf.fasta".format(outputName)]) subprocess.call(["cp", os.path.join("runSOAP", "soapAssembly.contig"), "{}.ctg.fasta".format(outputName)]) if not "keep_tmp_files" in flags: subprocess.call(["rm", "-r", "runSOAP"]) else: print("something wrong with SOAPdenovo -> no contig file generated") else: print("SOAPdenovo terminated with an error. Please check running", "folder for more informations") os.chdir("..") return sample_config os.chdir("..") return sample_config def _run_spades(global_config, sample_config, sorted_libraries_by_insert): ########## ACQUIRE ALL THE INFO AND CREATE THE ASSEMBLY FOLDER assembler = "spades" outputName = sample_config["output"] currentDirectory = os.getcwd() assemblyDirectory = os.path.join(currentDirectory, assembler) # in cabog case there is no exectuable programBIN = global_config["Tools"][assembler]["bin"] program_options = global_config["Tools"][assembler]["options"] sorted_libraries_by_insert = common._sort_libraries_by_insert(sample_config) if _prepare_folder_structure(assembler, assemblyDirectory) == 0: os.chdir(assemblyDirectory) else: return sample_config ########### HERE IT START THE SPECIFIC ASSEMBLER PART command = "" command += "{} ".format(programBIN) for option in program_options: command += "{} ".format(option) #creates the command on-the-fly peLibrary = 1 mpLibrary = 1 for library, libraryInfo in sorted_libraries_by_insert: read1 = libraryInfo["pair1"] read2 = libraryInfo["pair2"] orientation = libraryInfo["orientation"] insert = libraryInfo["insert"] std = libraryInfo["std"] if orientation=="innie" or orientation=="none": if read2 is None: command += "--pe{}-s {} ".format(peLibrary, read1) else: command += "--pe{}-1 {} --pe{}-2 {} ".format(peLibrary, read1, peLibrary, read2) peLibrary += 1 elif orientation=="outtie": command += "--mp{}-1 {} --mp{}-2 {} ".format(mpLibrary, read1, mpLibrary, read2) mpLibrary += 1 else: print("orientation{} not supported.... why the program did not", "failed earlier?".format(orientation)) command += "-o {} ".format(outputName) common.print_command(command) returnValue = 0 if not common.check_dryrun(sample_config): assembler_stdOut = open("spades.stdOut", "a") assembler_stdErr = open("spades.stdErr", "a") returnValue = subprocess.call(command, stdout=assembler_stdOut, stderr=assembler_stdErr, shell=True) else: return sample_config flags = sample_config.get("flags", []) if returnValue == 0: if os.path.exists(os.path.join(outputName,"contigs.fasta")): subprocess.call(["cp", os.path.join(outputName,"contigs.fasta"), "{}.ctg.fasta".format(outputName)]) subprocess.call(["cp", os.path.join(outputName,"scaffolds.fasta"), "{}.scf.fasta".format(outputName)]) if not "keep_tmp_files" in flags: subprocess.call(["rm", "-r", outputName]) else: print("something wrong with SPADES -> no contig file generated") else: print("SPADES terminated with an error. Please check running folder", "for more informations") os.chdir("..") return sample_config def _run_trinity(global_config, sample_config, sorted_libraries_by_insert): print("running trinity ...") assembler = "trinity" outputName = sample_config["output"] currentDirectory = os.getcwd() assemblyDirectory = os.path.join(currentDirectory, assembler) if common.directory_exists(assemblyDirectory): return sample_config os.chdir(assemblyDirectory) # now I am in the assembly directory sorted_libraries_by_insert = common.prepare_folder_structure( sorted_libraries_by_insert) # in masurca case there is no exectuable as a make file must be created programBIN = global_config["Tools"][assembler]["bin"] + "Trinity" program_options = global_config["Tools"][assembler]["options"] if assembler in sample_config: program_options=sample_config[assembler] ########### HERE IT START THE SPECIFIC ASSEMBLER PART command = [programBIN] command.extend(["--seqType", "fq"]) command.extend(["--JM", "100G"]) if "threads" in sample_config: command.extend(["--CPU", str(sample_config["threads"])]) for library, libraryInfo in sorted_libraries_by_insert: read1 = libraryInfo["pair1"] read2 = libraryInfo["pair2"] orientation = libraryInfo["orientation"] insert = libraryInfo["insert"] std = libraryInfo["std"] if read2 is None: command.append("--single") command.append("{}".format(read1)) elif orientation=="innie": command.append("--left") command.append("{}".format(read1)) command.append("--right") command.append("{}".format(read2)) else: print("trinity: somthing wrong or unexpected in the sample", "config file") return sample_config command.extend(["--output", "trinity"]) assembler_stdOut = open("trinity.stdOut", "w") assembler_stdErr = open("trinity.stdErr", "w") print(" ".join(command)) returnValue = subprocess.call(" ".join(command), stdout=assembler_stdOut, stderr=assembler_stdErr, shell=True) # now align reads back to transcripts and estimate abundance os.chdir("trinity") programBIN = global_config["Tools"][assembler]["bin"] + \ "util/align_and_estimate_abundance.pl" command = [programBIN] command.extend(["--transcripts", "Trinity.fasta"]) command.extend(["--seqType", "fq"]) for library, libraryInfo in sorted_libraries_by_insert: read1 = libraryInfo["pair1"] read2 = libraryInfo["pair2"] orientation = libraryInfo["orientation"] insert = libraryInfo["insert"] std = libraryInfo["std"] if read2 is not None and orientation == "innie": command.append("--left") command.append("{}".format(os.path.splitext(read1)[0])) command.append("--right") command.append("{}".format(os.path.splitext(read2)[0])) command.extend(["--aln_method", "bowtie"]) command.extend(["--est_method", "RSEM"]) command.append("--debug") command.append("--trinity_mode") command.append("--prep_reference") if "threads" in sample_config: command.extend(["--thread_count", str(sample_config["threads"])]) print(" ".join(command)) returnValue = subprocess.call(" ".join(command), stdout=assembler_stdOut, stderr=assembler_stdErr, shell=True) #now copy results os.chdir("..") subprocess.call(["cp", "trinity/Trinity.fasta", "{}.fasta".format(outputName)]) subprocess.call(["cp", "trinity/RSEM.isoforms.results", "{}.isoforms.results".format(outputName)]) subprocess.call(["cp", "trinity/RSEM.genes.results", "{}.genes.results".format(outputName)]) os.chdir(currentDirectory) return sample_config def _prepare_folder_structure(assembler,assemblyDirectory): if common.directory_exists(assemblyDirectory): print("Assembler {} asumer already computed as folder {} exists".format( assembler,assemblyDirectory)) return 1 return 0 def _run_abyss_mergePairs(global_config, sample_config, sorted_libraries_by_insert): print("running abyss-mergepairs ...") assembler = "abyss_mergePairs" outputName = sample_config["output"] currentDirectory = os.getcwd() assemblyDirectory = os.path.join(currentDirectory, assembler) if common.directory_exists(assemblyDirectory): return sample_config os.chdir(assemblyDirectory) # now I am in the assembly directory sorted_libraries_by_insert = common.prepare_folder_structure( sorted_libraries_by_insert) # in abyss case there is no exectuable programBIN = global_config["Tools"][assembler]["bin"] program_options = global_config["Tools"][assembler]["options"] if assembler in sample_config: program_options=sample_config[assembler] ########### HERE IT START THE SPECIFIC ASSEMBLER PART program=programBIN command = [] command.append(program) for option in program_options: command.append(option) libraries = {} for library, libraryInfo in sorted_libraries_by_insert: read1 = libraryInfo["pair1"] read2 = libraryInfo["pair2"] orientation = libraryInfo["orientation"] insert = libraryInfo["insert"] std = libraryInfo["std"] outputNameArray = read1.split('/')[-1].split('_') outputName = "{}_{}".format(outputNameArray[0], outputNameArray[1]) if orientation=="innie": if read2 is not None: currentCommand = command; currentCommand.append('-o') currentCommand.append(outputName) currentCommand.append(read1) currentCommand.append(read2) abyss_stdOut = open("mergePairs_{}.stdOut".format(outputName), "a") abyss_stdErr = open("mergePairs_{}.stdErr".format(outputName), "a") print(command) subprocess.call(command, stdout=abyss_stdOut, stderr=abyss_stdErr) command_mv = ["mv", "mergePairs_{}.stdErr".format(outputName), "{}.txt".format(outputName)] subprocess.call(command_mv) os.chdir("..") return sample_config
senthil10/NouGAT
nougat/assemble.py
Python
mit
36,060
[ "Bowtie" ]
af473e5b8ba0b91d19831ee485692a1897fb3690af6bcc9df48e8980ab0cef09
# -*- coding: utf-8 -*- # #Created on Fri Apr 14 15:21:17 2017 # #author: Elina Thibeau-Sutre # from .initializations import initialize_log_assignements,initialize_mcw from .base import _full_covariance_matrices from .base import _log_normal_matrix from .base import BaseMixture from .base import _log_B import numpy as np from scipy.special import psi,betaln from scipy.misc import logsumexp class DPVariationalGaussianMixture(BaseMixture): """ Variational Bayesian Estimation of a Gaussian Mixture with Dirichlet Process This class allows to infer an approximate posterior distribution over the parameters of a Gaussian mixture distribution. The weights distribution follows a Dirichlet Process with attribute alpha. Parameters ---------- n_components : int, defaults to 1. Number of clusters used. init : str, defaults to 'kmeans'. Method used in order to perform the initialization, must be in ['random', 'plus', 'AF_KMC', 'kmeans', 'GMM', 'VBGMM']. reg_covar : float, defaults to 1e-6 In order to avoid null covariances this float is added to the diagonal of covariance matrices. type_init : str, defaults to 'resp'. The algorithm is initialized using this data (responsibilities if 'resp' or means, covariances and weights if 'mcw'). Other parameters ---------------- alpha_0 : float, Optional | defaults to None. The prior parameter on the weight distribution (Beta). A high value of alpha_0 will lead to equal weights, while a low value will allow some clusters to shrink and disappear. Must be greater than 0. If None, the value is set to 1/n_components beta_0 : float, Optional | defaults to None. The precision prior on the mean distribution (Gaussian). Must be greater than 0. If None, the value is set to 1.0 nu_0 : float, Optional | defaults to None. The prior of the number of degrees of freedom on the covariance distributions (Wishart). Must be greater or equal to dim. If None, the value is set to dim means_prior : array (dim,), Optional | defaults to None The prior value to compute the value of the means. If None, the value is set to the mean of points_data cov_wishart_prior : type depends on covariance_type, Optional | defaults to None If covariance_type is 'full' type must be array (dim,dim) If covariance_type is 'spherical' type must be float The prior value to compute the value of the precisions. pypcoeff : float | defaults to 0 If 0 the weights are generated according to a Dirichlet Process If >0 and <=1 the weights are generated according to a Pitman-Yor Process. Attributes ---------- name : str The name of the method : 'VBGMM' alpha : array of floats (n_components,2) Contains the parameters of the weight distribution (Beta) beta : array of floats (n_components,) Contains coefficients which are multipied with the precision matrices to form the precision matrix on the Gaussian distribution of the means. nu : array of floats (n_components,) Contains the number of degrees of freedom on the distribution of covariance matrices. _inv_prec : array of floats (n_components,dim,dim) Contains the equivalent of the matrix W described in Bishop's book. It is proportional to cov. _log_det_inv_prec : array of floats (n_components,) Contains the logarithm of the determinant of W matrices. cov : array of floats (n_components,dim,dim) Contains the computed covariance matrices of the mixture. means : array of floats (n_components,dim) Contains the computed means of the mixture. log_weights : array of floats (n_components,) Contains the logarithm of weights of each cluster. iter : int The number of iterations computed with the method fit() convergence_criterion_data : array of floats (iter,) Stores the value of the convergence criterion computed with data on which the model is fitted. convergence_criterion_test : array of floats (iter,) | if _early_stopping only Stores the value of the convergence criterion computed with test data if it exists. _is_initialized : bool Ensures that the method _initialize() has been used before using other methods such as score() or predict_log_assignements(). Raises ------ ValueError : if the parameters are inconsistent, for example if the cluster number is negative, init_type is not in ['resp','mcw']... References ---------- 'Variational Inference for Dirichlet Process Mixtures', D. Blei and M. Jordan """ def __init__(self, n_components=1,init="kmeans",alpha_0=None,beta_0=None, nu_0=None,means_prior=None,cov_wishart_prior=None, reg_covar=1e-6,type_init='resp',n_jobs=1,pypcoeff=0, boost=None): super(DPVariationalGaussianMixture, self).__init__() self.n_components = n_components self.covariance_type = "full" self.init = init self.type_init = type_init self.reg_covar = reg_covar self.boost = boost self.alpha_0 = alpha_0 self.beta_0 = beta_0 self.nu_0 = nu_0 self.pypcoeff = pypcoeff self._means_prior = means_prior self._inv_prec_prior = cov_wishart_prior self.n_jobs = n_jobs self._is_initialized = False self.iter = 0 self.convergence_criterion_data = [] self.convergence_criterion_test = [] self._check_common_parameters() self._check_parameters() if pypcoeff==0: self.name = 'DPGMM' else: self.name = 'PYPGMM' def _check_parameters(self): """ Check the value of the init parameter """ if self.init not in ['random', 'random_sk', 'plus', 'kmeans', 'AF_KMC', 'GMM', 'VBGMM']: raise ValueError("Invalid value for 'init': %s " "'init' should be in " "['random','plus','kmeans','AF_KMC','GMM','VBGMM']" % self.init) if self.pypcoeff < 0 or self.pypcoeff > 1: raise ValueError("Invalid value for 'pypcoeff': %s " "'pypcoeff' should be between 0 and 1" % self.init) if self.boost is not None : if self.boost < 0: raise ValueError("Invalid value for 'boost': %s " "'boost' should be positive" % self.init) if self.init == 'random_sk' and self.type_init=='mcw': raise ValueError("random_sk is only compatible with" "type_init = resp") def _initialize(self,points_data,points_test=None): """ This method initializes the Variational Gaussian Mixture by setting the values of the means, the covariances and other specific parameters (alpha, beta, nu) Parameters ---------- points_data : an array (n_points,dim) Data on which the model is fitted. points_test: an array (n_points,dim) | Optional Data used to do early stopping (avoid overfitting) """ n_points,dim = points_data.shape self._check_prior_parameters(points_data) if self.type_init == 'resp': log_assignements = initialize_log_assignements(self.init,self.n_components,points_data, points_test) self._inv_prec = np.empty((self.n_components,dim,dim)) self._log_det_inv_prec = np.empty(self.n_components) self.cov = np.empty((self.n_components,dim,dim)) self.alpha = np.empty((self.n_components,2)) self.log_weights = np.empty(self.n_components) self._step_M(points_data,log_assignements) # Boosting covariance matrices if self.boost is not None: self.cov *= self.boost self._inv_prec *= self.boost self._log_det_inv_prec += dim * np.log(self.boost) elif self.type_init == 'mcw': #Means, covariances and weights means,cov,log_weights = initialize_mcw(self.init,self.n_components,points_data, points_test) self.cov = cov self.means = means self.log_weights = log_weights # Hyper parameters N = np.exp(log_weights) * n_points self.alpha = np.asarray([1 + N, self.alpha_0 + np.hstack((np.cumsum(N[::-1])[-2::-1], 0))]).T self.alpha += np.asarray([-self.pypcoeff * np.ones(self.n_components), self.pypcoeff * np.arange(self.n_components)]).T self.beta = self.beta_0 + N self.nu = self.nu_0 + N # Matrix W self._inv_prec = cov * self.nu[:,np.newaxis,np.newaxis] self._log_det_inv_prec = np.log(np.linalg.det(self._inv_prec)) elif self.init == 'user': if self.type_init=='kmeans': self._initialize_cov(points_data) # Hyper parameters N = np.exp(self.log_weights) * n_points self.alpha = np.asarray([1 + N, self.alpha_0 + np.hstack((np.cumsum(N[::-1])[-2::-1], 0)) ]).T self.alpha += np.asarray([-self.pypcoeff * np.ones(self.n_components), self.pypcoeff * np.arange(self.n_components)]).T self.beta = self.beta_0 + N self.nu = self.nu_0 + N # Matrix W self._inv_prec = self.cov * self.nu[:,np.newaxis,np.newaxis] self._log_det_inv_prec = np.log(np.linalg.det(self._inv_prec)) self._is_initialized = True def _step_E(self, points): """ In this step the algorithm evaluates the responsibilities of each points in each cluster Parameters ---------- points : an array (n_points,dim) Returns ------- log_resp: an array (n_points,n_components) an array containing the logarithm of the responsibilities. log_prob_norm : an array (n_points,) logarithm of the probability of each sample in points """ n_points,dim = points.shape log_gaussian = _log_normal_matrix(points,self.means,self.cov,'full',self.n_jobs) log_gaussian -= 0.5 * dim * np.log(self.nu) digamma_sum = np.sum(psi(.5 * (self.nu - np.arange(0, dim)[:,np.newaxis])),0) log_lambda = digamma_sum + dim * np.log(2) log_prob = self.log_weights + log_gaussian + 0.5 * (log_lambda - dim / self.beta) log_prob_norm = logsumexp(log_prob, axis=1) log_resp = log_prob - log_prob_norm[:,np.newaxis] return log_prob_norm,log_resp def _step_M(self,points,log_resp): """ In this step the algorithm updates the values of the parameters (means, covariances, alpha, beta, nu). Parameters ---------- points : an array (n_points,dim) log_resp: an array (n_points,n_components) an array containing the logarithm of the responsibilities. """ n_points,dim = points.shape resp = np.exp(log_resp) # Convenient statistics N = np.sum(resp,axis=0) + 10*np.finfo(resp.dtype).eps #Array (n_components,) X_barre = 1/N[:,np.newaxis] * np.dot(resp.T,points) #Array (n_components,dim) S = _full_covariance_matrices(points,X_barre,N,resp,self.reg_covar,self.n_jobs) #Parameters update self.alpha = np.asarray([1.0 + N, self.alpha_0 + np.hstack((np.cumsum(N[::-1])[-2::-1], 0))]).T self.alpha += np.asarray([-self.pypcoeff * np.ones(self.n_components), self.pypcoeff * np.arange(self.n_components)]).T self.beta = self.beta_0 + N self.nu = self.nu_0 + N # Weights update for i in range(self.n_components): if i==0: self.log_weights[i] = psi(self.alpha[i][0]) - psi(np.sum(self.alpha[i])) else: self.log_weights[i] = psi(self.alpha[i][0]) - psi(np.sum(self.alpha[i])) self.log_weights[i] += self.log_weights[i-1] + psi(self.alpha[i-1][1]) - psi(self.alpha[i-1][0]) # Means update means = self.beta_0 * self._means_prior + N[:,np.newaxis] * X_barre self.means = means * np.reciprocal(self.beta)[:,np.newaxis] self.means_estimated = self.means # Covariance update for i in range(self.n_components): diff = X_barre[i] - self._means_prior product = self.beta_0 * N[i]/self.beta[i] * np.outer(diff,diff) self._inv_prec[i] = self._inv_prec_prior + N[i] * S[i] + product det_inv_prec = np.linalg.det(self._inv_prec[i]) self._log_det_inv_prec[i] = np.log(det_inv_prec) self.cov[i] = self._inv_prec[i] / self.nu[i] def _convergence_criterion_simplified(self,points,log_resp,log_prob_norm): """ Compute the lower bound of the likelihood using the simplified Blei and Jordan formula. Can only be used with data which fits the model. Parameters ---------- points : an array (n_points,dim) log_resp: an array (n_points,n_components) an array containing the logarithm of the responsibilities. log_prob_norm : an array (n_points,) logarithm of the probability of each sample in points Returns ------- result : float the lower bound of the likelihood """ resp = np.exp(log_resp) n_points,dim = points.shape prec = np.linalg.inv(self._inv_prec) prec_prior = np.linalg.inv(self._inv_prec_prior) lower_bound = np.zeros(self.n_components) for i in range(self.n_components): lower_bound[i] = _log_B(prec_prior,self.nu_0) - _log_B(prec[i],self.nu[i]) resp_i = resp[:,i:i+1] log_resp_i = log_resp[:,i:i+1] lower_bound[i] -= np.sum(resp_i*log_resp_i) lower_bound[i] += dim*0.5*(np.log(self.beta_0) - np.log(self.beta[i])) result = np.sum(lower_bound) result -= self.n_components * betaln(1,self.alpha_0) result += np.sum(betaln(self.alpha.T[0],self.alpha.T[1])) result -= n_points * dim * 0.5 * np.log(2*np.pi) return result def _convergence_criterion(self,points,log_resp,log_prob_norm): """ Compute the lower bound of the likelihood using the Blei and Jordan formula. The formula cannot be simplified (as it is done in scikit-learn) as we also use it to calculate the lower bound of test points, in this case no simplification can be done. Parameters ---------- points : an array (n_points,dim) log_resp: an array (n_points,n_components) an array containing the logarithm of the responsibilities. log_prob_norm : an array (n_points,) logarithm of the probability of each sample in points Returns ------- result : float the lower bound of the likelihood """ resp = np.exp(log_resp) n_points,dim = points.shape # Convenient statistics N = np.sum(resp,axis=0) + 10*np.finfo(resp.dtype).eps #Array (n_components,) X_barre = 1/N[:,np.newaxis] * np.dot(resp.T,points) #Array (n_components,dim) S = _full_covariance_matrices(points,X_barre,N,resp,self.reg_covar,self.n_jobs) prec = np.linalg.inv(self._inv_prec) prec_prior = np.linalg.inv(self._inv_prec_prior) lower_bound = np.zeros(self.n_components) for i in range(self.n_components): digamma_sum = np.sum(psi(.5 * (self.nu[i] - np.arange(0, dim)[:,np.newaxis])),0) log_det_prec_i = digamma_sum + dim * np.log(2) - self._log_det_inv_prec[i] #/!\ Inverse #First line lower_bound[i] = log_det_prec_i - dim/self.beta[i] - self.nu[i]*np.trace(np.dot(S[i],prec[i])) diff = X_barre[i] - self.means[i] lower_bound[i] += -self.nu[i]*np.dot(diff,np.dot(prec[i],diff.T)) lower_bound[i] *= 0.5 * N[i] #Second line lower_bound[i] += _log_B(prec_prior,self.nu_0) - _log_B(prec[i],self.nu[i]) resp_i = resp[:,i:i+1] log_resp_i = log_resp[:,i:i+1] lower_bound[i] -= np.sum(resp_i*log_resp_i) lower_bound[i] += 0.5 * (self.nu_0 - self.nu[i]) * log_det_prec_i lower_bound[i] += dim*0.5*(np.log(self.beta_0) - np.log(self.beta[i])) lower_bound[i] += dim*0.5*(1 - self.beta_0/self.beta[i] + self.nu[i]) #Third line without the last term which is not summed diff = self.means[i] - self._means_prior lower_bound[i] += -0.5*self.beta_0*self.nu[i]*np.dot(diff,np.dot(prec[i],diff.T)) lower_bound[i] += -0.5*self.nu[i]*np.trace(np.dot(self._inv_prec_prior,prec[i])) #Terms with alpha lower_bound[i] += (N[i] + 1.0 - self.alpha[i,0]) * (psi(self.alpha[i,0]) - psi(np.sum(self.alpha[i]))) lower_bound[i] += (np.sum(N[i+1::]) + self.alpha_0 - self.alpha[i,1]) * (psi(self.alpha[i,1]) - psi(np.sum(self.alpha[i]))) result = np.sum(lower_bound) result -= self.n_components * betaln(1,self.alpha_0) result += np.sum(betaln(self.alpha.T[0],self.alpha.T[1])) result -= n_points * dim * 0.5 * np.log(2*np.pi) return result def _get_parameters(self): return (self.log_weights, self.means, self.cov, self.alpha, self.beta, self.nu) def _set_parameters(self, params,verbose=True): (self.log_weights, self.means, self.cov, self.alpha, self.beta, self.nu )= params # Matrix W self._inv_prec = self.cov * self.nu[:,np.newaxis,np.newaxis] self._log_det_inv_prec = np.log(np.linalg.det(self._inv_prec)) if self.n_components != len(self.means) and verbose: print('The number of components changed') self.n_components = len(self.means) def _limiting_model(self,points): n_points,dim = points.shape log_resp = self.predict_log_resp(points) _,n_components = log_resp.shape exist = np.zeros(n_components) for i in range(n_points): for j in range(n_components): if np.argmax(log_resp[i])==j: exist[j] = 1 idx_existing = np.where(exist==1) log_weights = self.log_weights[idx_existing] means = self.means[idx_existing] cov = self.cov[idx_existing] alpha = self.alpha[idx_existing] beta = self.beta[idx_existing] nu = self.nu[idx_existing] params = (log_weights, means, cov, alpha, beta, nu) return params
14thibea/megamix
megamix/batch/DPGMM.py
Python
apache-2.0
20,644
[ "Gaussian" ]
f4654df2a44bad67b3215e2cfc5bc2c9c8c7bc43e829ac60432b692daa60fbbc
# -*- coding: utf-8 -*- # # mpitest_issue_578_sp.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ This test is called from test_mpitests.py """ import nest import sys HAVE_GSL = nest.ll_api.sli_func("statusdict/have_gsl ::") class TestIssue578(): def test_targets(self): nest.ResetKernel() nest.set_verbosity('M_ALL') # Testing with 2 MPI processes nest.SetKernelStatus( { 'resolution': 0.1, 'total_num_virtual_procs': 2 } ) # Update the SP interval nest.EnableStructuralPlasticity() nest.SetKernelStatus({ 'structural_plasticity_update_interval': 1000., }) growth_curve = { 'growth_curve': "gaussian", 'growth_rate': 0.0001, # Beta (elements/ms) 'continuous': False, 'eta': 0.1, 'eps': 0.7, } structural_p_elements_E = { 'Den_ex': growth_curve, 'Den_in': growth_curve, 'Axon_ex': growth_curve } neuronDict = {'V_m': -60., 't_ref': 5.0, 'V_reset': -60., 'V_th': -50., 'C_m': 200., 'E_L': -60., 'g_L': 10., 'E_ex': 0., 'E_in': -80., 'tau_syn_ex': 5., 'tau_syn_in': 10., 'I_e': 220.} nest.SetDefaults("iaf_cond_exp", neuronDict) neuronsE = nest.Create('iaf_cond_exp', 1, { 'synaptic_elements': structural_p_elements_E}) # synapses synDictE = {'synapse_model': 'static_synapse', 'weight': 3., 'pre_synaptic_element': 'Axon_ex', 'post_synaptic_element': 'Den_ex'} nest.SetKernelStatus({ 'structural_plasticity_synapses': { 'synapseEE': synDictE, } }) try: nest.Simulate(200 * 1000) except Exception: print(sys.exc_info()[0]) self.fail("Exception during simulation") # We can not define the regular suite() and runner() functions here, because # it will not show up as failed in the testsuite if it fails. This is # because the test is called from test_mpitests, and the unittest system in # test_mpitests will only register the failing test if we call this test # directly. if HAVE_GSL: mpitest = TestIssue578() mpitest.test_targets() else: print("Skipping because GSL is not available")
lekshmideepu/nest-simulator
testsuite/pytests/test_sp/mpitest_issue_578_sp.py
Python
gpl-2.0
3,178
[ "Gaussian" ]
b9e91947f2d9637ee63ae667ba830a9bd3400a6f7f0591b3b7dc073c76b299ff
import os from __main__ import vtk, qt, ctk, slicer from slicer.ScriptedLoadableModule import * import logging # # DeveloperToolsForExtensions # class DeveloperToolsForExtensions(ScriptedLoadableModule): """Uses ScriptedLoadableModule base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def __init__(self, parent): ScriptedLoadableModule.__init__(self, parent) self.parent.title = "Developer Tools For Extensions" self.parent.categories = ["Developer Tools"] self.parent.dependencies = [] self.parent.contributors = ["Francois Budin (UNC)"] # replace with "Firstname Lastname (Organization)" self.parent.helpText = """ This extension gives the developers easy access to convenient functions that are available in Slicer \ but difficult to access. """ self.parent.acknowledgementText = """ This work is supported by NA-MIC and the Slicer Community. See <a>http://www.slicer.org</a> for details. """ # # DeveloperToolsForExtensionsWidget # class DeveloperToolsForExtensionsWidget(ScriptedLoadableModuleWidget): """Uses ScriptedLoadableModuleWidget base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def setup(self): ScriptedLoadableModuleWidget.setup(self) self.timeout = 3000 self.extensionFileDialog = None self.moduleFileDialog = None icon = self.parent.style().standardIcon(qt.QStyle.SP_ArrowForward) iconSize = qt.QSize(22, 22) def createToolButton(text): tb = qt.QToolButton() tb.text = text tb.icon = icon font = tb.font font.setBold(True) font.setPixelSize(14) tb.font = font tb.iconSize = iconSize tb.toolButtonStyle = qt.Qt.ToolButtonTextBesideIcon tb.autoRaise = True return tb # Instantiate and connect widgets ... # # Parameters Area # parametersCollapsibleButton = ctk.ctkCollapsibleButton() parametersCollapsibleButton.text = "Developer tools for extensions" self.layout.addWidget(parametersCollapsibleButton) # Layout within the dummy collapsible button parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton) # Select extension to install self.extensionSelector = createToolButton("Install extension archive") self.extensionSelector.setToolTip("Select extension archive that\ has been locally created or manually downloaded") parametersFormLayout.addRow(self.extensionSelector) # Select script module to load self.moduleSelector = createToolButton("Load module") self.moduleSelector.setToolTip("Select a module you want to load in Slicer") parametersFormLayout.addRow(self.moduleSelector) # connections self.extensionSelector.connect('clicked(bool)', self.onExtensionSelect) self.moduleSelector.connect('clicked(bool)', self.onModuleSelect) # Add vertical spacer self.layout.addStretch(1) # Create logic self.logic = DeveloperToolsForExtensionsLogic() def cleanup(self): pass def customDialog(self, filter_name, okCaption, windowTitle ): dialog = qt.QFileDialog(self.parent) dialog.options = dialog.DontUseNativeDialog dialog.acceptMode = dialog.AcceptOpen dialog.setNameFilter(filter_name) dialog.setLabelText(qt.QFileDialog.Accept, okCaption) dialog.setWindowTitle(windowTitle) return dialog def onModuleSelect(self): if not self.moduleFileDialog: self.moduleFileDialog = self.customDialog("Script module (*.py)", "Load", "Select module to load") self.moduleFileDialog.connect("fileSelected(QString)", self.onModuleFileSelected) self.moduleFileDialog.show() def onModuleFileSelected(self, fileName): self.moduleFileDialog.hide() value = qt.QMessageBox.question(slicer.util.mainWindow(), "", "Do you want to add module path to permanent search paths?", qt.QMessageBox.Yes | qt.QMessageBox.No) permanent = False if value == qt.QMessageBox.Yes: permanent = True try: self.logic.addModule(fileName,permanent) slicer.util.delayDisplay("Module "+fileName+" loaded", self.timeout) except Exception as e: logging.critical(e.message) slicer.util.errorDisplay(e.message, self.timeout) def onExtensionSelect(self): if not self.extensionFileDialog: self.extensionFileDialog = self.customDialog("Extension archive (*.zip *.tar.gz)", "Install", "Select extension to install") self.extensionFileDialog.connect("fileSelected(QString)", self.onExtensionFileSelected) self.extensionFileDialog.show() def onExtensionFileSelected(self, fileName): self.extensionFileDialog.hide() try: self.logic.installExtension(fileName) value=qt.QMessageBox.question(slicer.util.mainWindow(), "", "Are you sure you want to restart?", qt.QMessageBox.Ok | qt.QMessageBox.No) # http://qt-project.org/doc/qt-4.8/qmessagebox.html#StandardButton-enum if value == qt.QMessageBox.Ok: slicer.util.restart() except Exception as e: slicer.util.errorDisplay(e.message, self.timeout) # # DeveloperToolsForExtensionsLogic # class DeveloperToolsForExtensionsLogic(ScriptedLoadableModuleLogic): """This class should implement all the actual computation done by your module. The interface should be such that other python code can import this class and make use of the functionality without requiring an instance of the Widget. Uses ScriptedLoadableModuleLogic base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def PlatformCheck(self, filename): """Compare extension platform with current platform. """ name = os.path.basename(filename) try: extensionrepositoryRevision, extensionos, extensionarch = name.split('-')[:3] except: raise Exception('extension name does not match expected format \ (Revision-OS-Arch-NameAndExtension)') for var in ('repositoryRevision', 'os', 'arch'): currentVar = getattr(slicer.app, var) extensionVar = locals()['extension'+var] if extensionVar != currentVar: raise Exception(var+": "+currentVar+"(Slicer) "+extensionVar+"(extension)") return True # http://stackoverflow.com/questions/17277566/check-os-path-isfilefilename-with-case-sensitive-in-python def CheckFileExistsCaseSensitive(self, filename): """Verifies that the given file exists. Default python function to do so (\ os.path.isfile(filename) ) is not case-sensitive. """ if not os.path.isfile(filename): return False # exit early, file does not exist (not even with wrong case) directory, filename = os.path.split(filename) return filename in os.listdir(directory) def installExtension(self, filename): """ Install a given extension, from an archive, in Slicer """ try: self.CheckFileExistsCaseSensitive(filename) except: raise Exception('Extension file does not exist') try: self.PlatformCheck(filename) except: raise Exception('Extension file for wrong platform') logging.info('Extension installation process started') val = slicer.app.extensionsManagerModel().installExtension(filename) logging.info('Extension installation process completed') return val # From ExtensionWizard.py in Slicer def _settingsList(self, settings, key): """ Returns a settings value as a list (even if empty or a single value) """ value = settings.value(key) if isinstance(value, basestring): return [value] return [] if value is None else value # From ExtensionWizard.py in Slicer def addModule(self, fileName, permanent): """ Loads a module in the Slicer factory while Slicer is running """ logging.info('Module addition process started') # Determine which modules in above are not already loaded factory = slicer.app.moduleManager().factoryManager() myModule = type('moduleType', (), {}) myModule.dirPath = os.path.dirname(fileName) myModule.baseName = os.path.basename(fileName) myModule.key, myModule.fileExtension = os.path.splitext(myModule.baseName) if factory.isLoaded(myModule.key): raise Exception("Abort: Module already loaded") if permanent: # Add module(s) to permanent search paths, if requested settings = slicer.app.revisionUserSettings() rawSearchPaths = list(self._settingsList(settings, "Modules/AdditionalPaths")) searchPaths = [qt.QDir(path) for path in rawSearchPaths] modified = False rawPath = myModule.dirPath path = qt.QDir(rawPath) if path not in searchPaths: searchPaths.append(path) rawSearchPaths.append(rawPath) modified = True if modified: settings.setValue("Modules/AdditionalPaths", rawSearchPaths) # Register requested module(s) factory.registerModule(qt.QFileInfo(fileName)) if not factory.isRegistered(myModule.key): raise Exception("Abort: Failed to register module %s", myModule.key) # Instantiate and load requested module(s) if not factory.loadModules([myModule.key]): raise Exception("Abort: The module factory manager reported an error. \ One or more of the requested module(s) and/or \ dependencies thereof may not have been loaded.") logging.info('Module addition process completed') return True class DeveloperToolsForExtensionsTest(ScriptedLoadableModuleTest): """ This is the test case for your scripted module. Uses ScriptedLoadableModuleTest base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def runTest(self): # """Run as few or as many tests as needed here. # """ self.test_PlatformCheck1() self.test_PlatformCheck2() self.test_CheckFileExistsCaseSensitive1() self.test_CheckFileExistsCaseSensitive2() # To be debugged->Uninstall function seems to create issues with Python when restarting Slicer # self.test_installExtension() def test_PlatformCheck1(self): """Verifies that platformCheck() works appropriately if filename with correct platform given. """ testName = "PlatformCheck1" self.delayDisplay("Starting the test: "+testName) absoluteDummyPath = "/test/hello" dummyExtensionName = "myExtension" dummyExtensionSuffix = "svn224-2014-07-28.tar.gz" # Extension does not matter for this test myCurrentOS = slicer.app.os myCurrentArch = slicer.app.arch myCurrentRev = slicer.app.repositoryRevision myFileName = "-".join([myCurrentRev, myCurrentOS, myCurrentArch, dummyExtensionName, dummyExtensionSuffix]) myAbsoluteFileName = "/".join([absoluteDummyPath, myFileName]) logging.info("Check platform with given dummy file name:%s", myAbsoluteFileName) logic = DeveloperToolsForExtensionsLogic() self.assertTrue(logic.PlatformCheck(myAbsoluteFileName)) self.delayDisplay(testName+': Test passed!') def test_PlatformCheck2(self): """Verifies that plafformCheck() raises exceptions when the given file does not correspond to the current platform. Tests with wrong OS, wrong architecture and wrong revision number. """ testName = "PlatformCheck2" self.delayDisplay("Starting the test: "+testName) absoluteDummyPath = "/test/hello" dummyExtensionName = "myExtension" dummyExtensionSuffix = "svn224-2014-07-28.tar.gz" # Extension does not matter for this test myCurrentOS = slicer.app.os myCurrentArch = slicer.app.arch myCurrentRev = slicer.app.repositoryRevision logic = DeveloperToolsForExtensionsLogic() # check that an exception is raised for wrong OS listOS = ['linux', 'macosx', 'win'] listOS.remove(myCurrentOS) for testOS in listOS: myFileName = "-".join([myCurrentRev, testOS, myCurrentArch, dummyExtensionName, dummyExtensionSuffix]) myAbsoluteFileName = "/".join([absoluteDummyPath,myFileName]) logging.info("Check platform with given dummy file name:%s", myAbsoluteFileName) with self.assertRaises(Exception) as cm: logic.PlatformCheck(myAbsoluteFileName) e = cm.exception self.assertEqual(e.message, "os: "+myCurrentOS+"(Slicer) "+testOS+"(extension)") # Check that False is returned for wrong revision number badRev = "xxxxx" myFileName = "-".join([badRev, myCurrentOS, myCurrentArch, dummyExtensionName, dummyExtensionSuffix]) myAbsoluteFileName = "/".join([absoluteDummyPath, myFileName]) logging.info("Check platform with given dummy file name:%s", myAbsoluteFileName) with self.assertRaises(Exception) as cm: logic.PlatformCheck(myAbsoluteFileName) e = cm.exception self.assertEqual(e.message, "repositoryRevision: "+myCurrentRev+"(Slicer) "+badRev+"(extension)") # Check that False is returned for wrong architecture detected badArchitecture = "badArch" myFileName = "-".join([myCurrentRev, myCurrentOS, badArchitecture, dummyExtensionName, dummyExtensionSuffix]) myAbsoluteFileName = "/".join([absoluteDummyPath, myFileName]) logging.info("Check platform with given dummy file name:%s", myAbsoluteFileName) with self.assertRaises(Exception) as cm: logic.PlatformCheck(myAbsoluteFileName) e = cm.exception self.assertEqual(e.message, "arch: "+myCurrentArch+"(Slicer) "+badArchitecture+"(extension)") self.delayDisplay(testName+': Test passed!') def test_CheckFileExistsCaseSensitive1(self): """Checks that CheckFileExistsCaseSensitive returns False if given file does not exist. """ testName = "CheckFileExistsCaseSensitive1" self.delayDisplay("Starting the test: "+testName) logic = DeveloperToolsForExtensionsLogic() slicerPath = slicer.app.applicationFilePath() fileDoesNotExists = os.path.join(slicerPath, "fileThatDoesntExist") logging.info("Check that the given file which does not exist is not found:%s", fileDoesNotExists) self.assertTrue(not logic.CheckFileExistsCaseSensitive(fileDoesNotExists)) self.delayDisplay(testName+': Test passed!') def test_CheckFileExistsCaseSensitive2(self): """Checks that CheckFileExistsCaseSensitive returns True if given file exists. """ testName = "CheckFileExistsCaseSensitive2" self.delayDisplay("Starting the test: "+testName) logic = DeveloperToolsForExtensionsLogic() slicerPath = slicer.app.applicationFilePath() logging.info("Check that the given file is found:%s", slicerPath) self.assertTrue(logic.CheckFileExistsCaseSensitive(slicerPath)) self.delayDisplay(testName+': Test passed!') def _install_dummy_extension(self, myExtensionName): logic = DeveloperToolsForExtensionsLogic() myCurrentOS = slicer.app.os myCurrentArch = slicer.app.arch myCurrentRev = slicer.app.repositoryRevision # The only archive format we are sure we have is zip, through the python interface. # Since this format works on the 3 platforms we support (Windows, MacOS, and linux), # we use this format instead of 'tar.gz' on linux and MacOS. extenstion = ".zip" myExtensionFileRootName = "-".join([myCurrentRev, myCurrentOS, myCurrentArch, myExtensionName]) tempPath = slicer.app.temporaryPath currentFilePath = os.path.dirname(os.path.realpath(__file__)) inputDescriptionFile = os.path.join(currentFilePath, "Testing", "Python", "myDummyExtension.s4ext") myCurrentOS = slicer.app.os myCurrentVersion = slicer.app.applicationVersion versionNoDate = myCurrentVersion.split("-") # Get version number without date versionSplit = versionNoDate[0].split(".") # Split major.minor.patch # Create a variable containing a string of the form "Slicer-4.4" slicerVersionDirectory = "Slicer-"+versionSplit[0]+"."+versionSplit[1] if myCurrentOS == "macosx": internalPackagePath = os.path.join(myExtensionFileRootName, "Slicer.app", "Contents", "Extensions-"+myCurrentRev, myExtensionName, "share", slicerVersionDirectory) else: # "win" or linux internalPackagePath = os.path.join(myExtensionFileRootName,"share", slicerVersionDirectory) import errno try: pathToCreate = os.path.join(tempPath, internalPackagePath) logging.info("Directory to create for test extension: "+pathToCreate) os.makedirs(pathToCreate) except OSError as exception: if exception.errno != errno.EEXIST: # We report error except if it is because directory already exists logging.critical("Error while creating extension directory in temp folder") return False logging.info("Extension directory already exists") try: import shutil outputDescriptionFile = os.path.join(pathToCreate, myExtensionName+".s4ext") shutil.copyfile(inputDescriptionFile, outputDescriptionFile) myExtensionFileName = myExtensionFileRootName+extenstion myExtensionInputDirectory = os.path.join(tempPath, myExtensionFileRootName) outputExtensionFileName = os.path.join(tempPath, myExtensionFileName) logging.info("Output zipped file name:"+outputExtensionFileName) logging.info("Directory to zip:"+myExtensionInputDirectory) slicer.vtkMRMLApplicationLogic().Zip(outputExtensionFileName, myExtensionInputDirectory) except Exception as exception: logging.critical(exception) return False if logic.installExtension(outputExtensionFileName): slicer.app.extensionsManagerModel().scheduleExtensionForUninstall(myExtensionName) return True return False def test_installExtension(self): """ Downloads and install a fake package. After the installation, is schedule the extension for uninstall as it cannot uninstall it right away. """ testName = "CheckIfInstallTestExtensionWorks" myTestExtension = "myTestExtension" # In case the extension is already installed, skip test and schedule for removal. if slicer.app.extensionsManagerModel().isExtensionInstalled(myTestExtension): slicer.app.extensionsManagerModel().scheduleExtensionForUninstall(myTestExtension) logging.info("Extension already installed. Scheduled to be removed.") self.delayDisplay('Test extension scheduled to be removed. Test skipped. Restart Slicer and run it again.', 3000) return self.delayDisplay("Starting the test: "+testName) # self.assertTrue(self._install_dummy_extension(myTestExtension)) self.delayDisplay(testName+': Test passed!')
lassoan/SlicerDeveloperToolsForExtensions
DeveloperToolsForExtensions/DeveloperToolsForExtensions.py
Python
apache-2.0
20,653
[ "VTK" ]
fab1a583ad772ea06e044addcd570d246a156b3b80bc0196be2f26a83bf91393
# Copyright 2011 Nicholas Bray # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ 'TypeDispatcher', 'defaultdispatch', 'dispatch', 'TypeDispatchError', 'TypeDispatchDeclarationError' ] import inspect def flattenTypesInto(l, result): for child in l: if isinstance(child, (list, tuple)): flattenTypesInto(child, result) else: if not isinstance(child, type): raise TypeDispatchDeclarationError, "Expected a type, got %r instead." % child result.append(child) def dispatch(*types): def dispatchF(f): def dispatchWrap(*args, **kargs): return f(*args, **kargs) dispatchWrap.__original__ = f dispatchWrap.__dispatch__ = [] flattenTypesInto(types, dispatchWrap.__dispatch__) return dispatchWrap return dispatchF def defaultdispatch(f): def defaultWrap(*args, **kargs): return f(*args, **kargs) defaultWrap.__original__ = f defaultWrap.__dispatch__ = (None,) return defaultWrap def dispatch__call__(self, p, *args): t = type(p) table = self.__typeDispatchTable__ func = table.get(t) if func is None: # Search for a matching superclass # This should occur only once per class. if self.__concrete__: possible = (t,) else: possible = t.mro() for supercls in possible: func = table.get(supercls) if func is not None: break elif self.__namedispatch__: # The emulates "visitor" dispatch, to allow for evolutionary refactoring name = self.__nameprefix__ + t.__name__ func = type(self).__dict__.get(name) if func is not None: break # default if func is None: func = table.get(None) # Cache the function that we found table[t] = func return func(self, p, *args) class TypeDispatchError(Exception): pass class TypeDispatchDeclarationError(Exception): pass def exceptionDefault(self, node, *args): raise TypeDispatchError, "%r cannot handle %r\n%r" % (type(self), type(node), node) def inlineAncestor(t, lut): if hasattr(t, '__typeDispatchTable__'): # Search for types that haven't been defined, yet. for k, v in t.__typeDispatchTable__.iteritems(): if k not in lut: lut[k] = v class typedispatcher(type): def __new__(self, name, bases, d): lut = {} restore = {} # Build the type lookup table from the local declaration for k, v in d.iteritems(): if hasattr(v, '__dispatch__') and hasattr(v, '__original__'): types = v.__dispatch__ original = v.__original__ for t in types: if t in lut: raise TypeDispatchDeclarationError, "%s has declared with multiple handlers for type %s" % (name, t.__name__) else: lut[t] = original restore[k] = original # Remove the wrappers from the methods d.update(restore) # Search and inline dispatch tables from the MRO for base in bases: for t in inspect.getmro(base): inlineAncestor(t, lut) if None not in lut: raise TypeDispatchDeclarationError, "%s has no default dispatch" % (name,) d['__typeDispatchTable__'] = lut return type.__new__(self, name, bases, d) class TypeDispatcher(object): __metaclass__ = typedispatcher __dispatch__ = dispatch__call__ __call__ = dispatch__call__ exceptionDefault = defaultdispatch(exceptionDefault) __concrete__ = False __namedispatch__ = False __nameprefix__ = 'visit'
ncbray/pystream
bin/util/typedispatch.py
Python
apache-2.0
3,787
[ "VisIt" ]
cef3a4ed03cd03d188ea5633194738c9ca102df22ea2b01a2fe08030532161ce
import os import shutil import pytest from eppy import modeleditor from eppy.modeleditor import IDF from eppy.runner.run_functions import paths_from_version, EnergyPlusRunError THIS_DIR = os.path.dirname(os.path.abspath(__file__)) RESOURCES_DIR = os.path.join(THIS_DIR, os.pardir, 'resources') IDD_FILES = os.path.join(RESOURCES_DIR, 'iddfiles') IDF_FILES = os.path.join(RESOURCES_DIR, 'idffiles') @pytest.mark.xfail def test_reproduce_run_issue(): """This is for use as a debugging tool. Add the files used in the run as reported/provided by a user. Make any additional changes required for reproducing/diagnosing the issue. """ # update the following four lines if necessary ep_version = "8-9-0" idffile = "V8_9/smallfile.idf" iddfile = "Energy+V8_9_0.idd" epwfile = "USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw" _, eplus_home = paths_from_version(ep_version) idfname = os.path.join(IDF_FILES, idffile) iddfile = os.path.join(IDD_FILES, iddfile) epwfile = os.path.join(eplus_home, "WeatherData", epwfile) modeleditor.IDF.setiddname(iddfile, testing=True) idf = IDF(idfname, epwfile) # make any changes to the IDF here try: # Add any additional `run` kwargs here # `ep_version` kwarg is required due to buggy test isolation idf.run(output_directory="test_dir", ep_version=ep_version) # Add any tests for expected/unexpected outputs here except Exception: # Add any tests for expected/unexpected exceptions here raise finally: shutil.rmtree("test_dir", ignore_errors=True)
jamiebull1/eppy
eppy/tests/test_reproduce_bugs.py
Python
mit
1,615
[ "EPW" ]
21b6069c21bfe721c3e9e1788a5e2d7dda3cc73775c8133cc73d65975283d8e8
#!/usr/bin/env python """Module to create concatemer per genome of orthologs, create a phylogenetic tree and deduce taxa from that tree.""" from Bio import AlignIO, Phylo, SeqIO from shared import create_directory, parse_options, extract_archive_of_files, create_archive_of_files from select_taxa import select_genomes_by_ids from versions import DNADIST, NEIGHBOR from subprocess import Popen, PIPE, STDOUT import logging as log import matplotlib import os.path import shutil import sys import tempfile __author__ = "Tim te Beek" __copyright__ = "Copyright 2011, Netherlands Bioinformatics Centre" __license__ = "MIT" def coding_regions_per_genome(run_dir, trimmed_sicos): """Create a DNA file per genome containing all aligned & trimmed SICO genes als individual genes.""" concatemer_dir = create_directory('coding_regions_per_genome', inside_dir=run_dir) log.info('Creating concatemers from {0} SICOs'.format(len(trimmed_sicos))) # Collections both for output files and their write handles, which will be reused for each SICO coding_region_files = [] write_handles = {} # Loop over trimmed sico files to append each sequence to the right concatemer for trimmed_sico in trimmed_sicos: for seqr in SeqIO.parse(trimmed_sico, 'fasta'): # Sample header line: >58191|NC_010067.1|YP_001569097.1|COG4948MR|core project_id = seqr.id.split('|')[0] # Try to retrieve write handle from dictionary of cached write handles per genome write_handle = write_handles.get(project_id) # If not found, create & store write handle on demand if not write_handle: # Build up output file path for trimmed SICO genes per genome coding_region_file = os.path.join(concatemer_dir, project_id + '.coding-regions.ffn') coding_region_files.append(coding_region_file) # Open write handle write_handle = open(coding_region_file, mode='w') write_handles[project_id] = write_handle # Write sequence record to coding-regions file SeqIO.write(seqr, write_handle, 'fasta') # Close genomes trimmed concatemer write handles for write_handle in write_handles.values(): write_handle.close() log.info('Created %i genome coding regions files', len(coding_region_files)) return sorted(coding_region_files) def concatemer_per_genome(run_dir, genome_coding_regions_files): """Create a concatemer DNA file per genome containing all aligned & trimmed SICO genes.""" concatemer_dir = create_directory('concatemers', inside_dir=run_dir) # Collection of output filenames concatemer_files = [] # Loop over genome coding regions files to create concatemer of each for coding_region_file in genome_coding_regions_files: # Determine output file name filename = os.path.split(coding_region_file)[1] basename = filename[:filename.find('.coding-regions')] concatemer_file = os.path.join(concatemer_dir, basename + '.concatemer.fna') concatemer_files.append(concatemer_file) # Copy ACTG content from coding regions file to concatemer with open(coding_region_file) as read_handle: with open(concatemer_file, mode='w') as write_handle: # Write out single concatemer header write_handle.write('> {0}|trimmed concatemer\n'.format(basename)) # Copy over all lines that are not header lines (do not start with '>') for line in read_handle: # Skip header lines if not line.startswith('>'): write_handle.write(line) log.info('Created %i genome concatemers', len(concatemer_files)) return sorted(concatemer_files) def create_super_concatemer(concatemer_files, destination_path): """Concatenate individual genome concatemers into a single super-concatemer for easy import into MEGA viewer.""" with open(destination_path, mode='w') as write_handle: for concatemer in concatemer_files: seqr = SeqIO.read(concatemer, 'fasta') SeqIO.write(seqr, write_handle, 'fasta') def _run_dna_dist(run_dir, aligned_file): """Run dnadist to calculate distances between individual strains in a distance matrix, as input for neighbor.""" # Run calculations inside a directory dnadist_dir = create_directory('dnadist/', inside_dir=run_dir) # Read alignment file alignment = AlignIO.read(aligned_file, 'fasta') # Convert alignment in to proper input file for dnadist according to specification nr_of_species = len(alignment) nr_of_sites = len(alignment[0]) infile = os.path.join(dnadist_dir, 'infile') with open(infile, mode='w') as write_handle: write_handle.write(' {0} {1}\n'.format(nr_of_species, nr_of_sites)) for seq_record in alignment: name = seq_record.id.split('|')[0] name = name if len(name) < 10 else name[:10] write_handle.write('{0:10}{1}\n'.format(name, seq_record.seq)) # Actually run the dnadist program in the correct directory, and send input to it for the first prompt process = Popen(DNADIST, cwd=dnadist_dir, stdin=PIPE, stdout=PIPE, stderr=STDOUT) process.communicate(input='Y\n') # Retrieve outputfile outfile = os.path.join(dnadist_dir, 'outfile') assert os.path.exists(outfile) and 0 < os.path.getsize(outfile), outfile + ' should exist with some content now' return outfile def _run_neighbor(run_dir, distance_file): """Run neighbor to generate a tree of the distances in the distance file, and return the generated tree file.""" neighbor_dir = create_directory('neighbor', inside_dir=run_dir) # Copy outfile from dnadist to infile inside neighbor_dir shutil.copy(distance_file, os.path.join(neighbor_dir, 'infile')) # Actually run neighbor process = Popen(NEIGHBOR, cwd=neighbor_dir, stdin=PIPE, stdout=PIPE, stderr=STDOUT) process.communicate(input='N\nY\n') # Retrieve newick tree file treefile = os.path.join(neighbor_dir, 'outtree') assert os.path.exists(treefile) and 0 < os.path.getsize(treefile), treefile + ' should exist with some content now' return treefile def _fix_misinterpreted_names(tree): """Bio.Phylo.read(file, 'newick') misinterprets numerical names as confidence scores. Fix that here in place.""" for leaf in tree.get_terminals(): if leaf.name == None and leaf.confidence != None: leaf.name = str(float(leaf.confidence)) leaf.confidence = None def _read_taxa_from_tree(tree_file): """Read tree_file in Newick format to identify the first two clades that split up this tree and their leafs.""" # Parse tree using BioPython, which interprets the GenBank Project IDs as confidence scores, but that'll do for now. phylo_tree = Phylo.read(tree_file, 'newick') _fix_misinterpreted_names(phylo_tree) # Of the full tree retrieve the clades from the root clade, expecting exactly two distinct clades after UPGMA clades = phylo_tree.clade.clades assert len(clades) == 2, 'Expected two clades as child of tree\'s first clade, but was {0}'.format(len(clades)) # Get all the leafs for the above two clades in a similar format to the genome_ids clade_one = sorted(leaf.name for leaf in clades[0].get_terminals()) clade_two = sorted(leaf.name for leaf in clades[1].get_terminals()) return clade_one, clade_two def visualize_tree(super_tree_file, id_to_name_map, tree_output): """Visualize the phylogenetic tree encoded in the Newick format super_tree_file, and write graphic to ascii_tree.""" # Draw phylogenetic tree tree = Phylo.read(super_tree_file, 'newick') _fix_misinterpreted_names(tree) for leaf in tree.get_terminals(): # Wrapped long genome names overlap when displayed. Maybe fix this by truncating first word to first letter+'.' organism_name = id_to_name_map.get(leaf.name, '').replace(' ', '\n', 1) leaf.name = '{0} {1}'.format(leaf.name, organism_name) # Ascertain we're using the correct display configuration matplotlib.use('Agg') # Set figure size here to large values, to see if this would solve the problem of truncated genome names import pylab pylab.figure(figsize=(12, 8)) # The below code works when installing python-networkx on ubuntu Phylo.draw(tree, do_show=False) # Save as file pylab.savefig(tree_output, format='pdf') # Print ascii tree, when you can't get visualization to work properly using draw_graphviz # Phylo.draw_graphviz(tree, prog = 'neato') # with open(tree_output, mode = 'w') as write_handle: # Phylo.draw_ascii(tree, file = write_handle, column_width = 120) def main(args): """Main function called when run from command line or as part of pipeline.""" usage = """ Usage: concatenate_orthologs.py --orthologs-zip=FILE archive of orthologous genes in FASTA format --coding-regions=FILE destination file path archive of trimmed orthologous coding regions per genomes --concatemer=FILE destination file path for super-concatemer of all genomes --taxon-a=FILE destination file path for genome IDs for taxon A --taxon-b=FILE destination file path for genome IDs for taxon B --tree=FILE destination file path for tree visualization """ options = ['orthologs-zip', 'coding-regions', 'concatemer', 'taxon-a', 'taxon-b', 'tree'] orthologs_zip, target_coding_regions, target_concat_file, target_taxon_a, target_taxon_b, target_tree = \ parse_options(usage, options, args) # Run filtering in a temporary folder, to prevent interference from simultaneous runs run_dir = tempfile.mkdtemp(prefix='concatemer_tree_') # Extract files from zip archive temp_dir = create_directory('orthologs', inside_dir=run_dir) ortholog_files = extract_archive_of_files(orthologs_zip, temp_dir) # Separate out orthologs per genome to create trimmed coding region files per genome genome_coding_regions_files = coding_regions_per_genome(run_dir, ortholog_files) create_archive_of_files(target_coding_regions, genome_coding_regions_files) # Concatenate coding region files per genome concatemer_files = concatemer_per_genome(run_dir, genome_coding_regions_files) # Create super concatemer create_super_concatemer(concatemer_files, target_concat_file) # Determine the taxa present in the super concatemer tree by building a phylogenetic tree from genome concatemer and # reading genome ids in the two largest clades. super_distance_file = _run_dna_dist(run_dir, target_concat_file) super_tree_file = _run_neighbor(run_dir, super_distance_file) genome_ids_a, genome_ids_b = _read_taxa_from_tree(super_tree_file) # Map Project IDs to Organism names id_to_name_map = dict((gid, genome['Organism/Name']) for gid, genome in select_genomes_by_ids(genome_ids_a + genome_ids_b).iteritems()) # Write Project IDs and Organism Names to files, with a fallback to genome_id for external genome with open(target_taxon_a, mode='w') as write_handle: for genome_id in genome_ids_a: write_handle.write('{id}\t{name}\n'.format(id=genome_id, name=id_to_name_map.get(genome_id, genome_id))) with open(target_taxon_b, mode='w') as write_handle: for genome_id in genome_ids_b: write_handle.write('{id}\t{name}\n'.format(id=genome_id, name=id_to_name_map.get(genome_id, genome_id))) # Visualize tree visualize_tree(super_tree_file, id_to_name_map, target_tree) # Remove unused files to free disk space shutil.rmtree(run_dir) # Exit after a comforting log message log.info('Produced: \n%s\n%s\n%s\n%s\n%s', target_coding_regions, target_concat_file, target_taxon_a, target_taxon_b, target_tree) if __name__ == '__main__': main(sys.argv[1:])
ODoSE/odose.nl
concatemer_tree.py
Python
mit
12,074
[ "Biopython" ]
07f963cebd5dd7269a96e2fcd8c81534fbb261be5dd0569bc92e1df50b0d1bdb
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2013, 2014 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Unit tests for workflows.""" from __future__ import absolute_import import random import time import logging from ..registry import WorkflowsRegistry from flask.ext.registry import ImportPathRegistry from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite TEST_PACKAGES = [ 'invenio.modules.*', 'invenio.modules.workflows.testsuite', ] class WorkflowTasksTestCase(InvenioTestCase): """ Workflow class for testing.""" def create_registries(self): """Create registries for testing.""" self.app.extensions['registry']['workflows.tests'] = \ ImportPathRegistry(initial=TEST_PACKAGES) self.app.extensions['registry']['workflows'] = \ WorkflowsRegistry( 'workflows', app=self.app, registry_namespace='workflows.tests' ) self.app.extensions['registry']['workflows.actions'] = \ WorkflowsRegistry( 'actions', app=self.app, registry_namespace='workflows.tests' ) def cleanup_registries(self): """Clean registries for testing.""" del self.app.extensions['registry']['workflows.tests'] del self.app.extensions['registry']['workflows'] del self.app.extensions['registry']['workflows.actions'] class WorkflowTasksTestAPI(WorkflowTasksTestCase): """ Test basic workflow API.""" def setUp(self): """Setup tests.""" self.create_registries() self.test_data = {} self.id_workflows = [] self.recxml = """<?xml version="1.0" encoding="UTF-8"?> <OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"> <responseDate>2013-04-03T13:56:49Z</responseDate> <request verb="ListRecords" from="2013-03-25" metadataPrefix="arXiv" set="physics:astro-ph">http://export.arxiv.org/oai2</request> <ListRecords> <record> <header> <identifier>oai:arXiv.org:0801.3931</identifier> <datestamp>2013-03-26</datestamp> <setSpec>physics:astro-ph</setSpec> </header> <metadata> <arXiv xmlns="http://arxiv.org/OAI/arXiv/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://arxiv.org/OAI/arXiv/ http://arxiv.org/OAI/arXiv.xsd"> <id>0801.3931</id><created>2008-01-25</created><authors><author><keyname>Manos</keyname><forenames>T.</forenames></author><author><keyname>Athanassoula</keyname><forenames>E.</forenames></author></authors><title>Dynamical study of 2D and 3D barred galaxy models</title><categories>astro-ph</categories><comments>8 pages, 3 figures, to appear in the proceedings of the international conference &quot;Chaos in Astronomy&quot;, Athens, Greece (talk contribution)</comments><journal-ref>Chaos in Astronomy Astrophysics and Space Science Proceedings 2009, pp 115-122</journal-ref><doi>10.1007/978-3-540-75826-6_11</doi><abstract> We study the dynamics of 2D and 3D barred galaxy analytical models, focusing on the distinction between regular and chaotic orbits with the help of the Smaller ALigment Index (SALI), a very powerful tool for this kind of problems. We present briefly the method and we calculate the fraction of chaotic and regular orbits in several cases. In the 2D model, taking initial conditions on a Poincar\'{e} $(y,p_y)$ surface of section, we determine the fraction of regular and chaotic orbits. In the 3D model, choosing initial conditions on a cartesian grid in a region of the $(x, z, p_y)$ space, which in coordinate space covers the inner disc, we find how the fraction of regular orbits changes as a function of the Jacobi constant. Finally, we outline that regions near the $(x,y)$ plane are populated mainly by regular orbits. The same is true for regions that lie either near to the galactic center, or at larger relatively distances from it. </abstract></arXiv> </metadata> </record> </ListRecords> </OAI-PMH> """ def tearDown(self): """ Clean up created objects.""" from invenio.modules.workflows.models import Workflow Workflow.get(Workflow.module_name == "unit_tests").delete() self.cleanup_registries() def test_halt(self): """Test halt task.""" from invenio.modules.workflows.registry import workflows from invenio.modules.workflows.api import start from invenio.modules.workflows.engine import WorkflowStatus from invenio.modules.workflows.models import (BibWorkflowObjectLog, ObjectVersion) halt_engine = lambda obj, eng: eng.halt("Test") class HaltTest(object): workflow = [halt_engine] workflows['halttest'] = HaltTest data = [set(('somekey', 'somevalue'))] eng = start('halttest', data, module_name="unit_tests") idx, obj = list(eng.getObjects())[0] self.assertEqual(ObjectVersion.WAITING, obj.version) self.assertEqual(WorkflowStatus.HALTED, eng.status) self.assertEqual(0, BibWorkflowObjectLog.get( id_object=obj.id, log_type=logging.ERROR).count()) def test_halt_in_branch(self): """Test halt task when in conditionnal branch.""" from workflow.patterns import IF_ELSE from invenio.modules.workflows.registry import workflows from invenio.modules.workflows.api import start from invenio.modules.workflows.engine import WorkflowStatus from invenio.modules.workflows.models import (BibWorkflowObjectLog, ObjectVersion) always_true = lambda obj, eng: True halt_engine = lambda obj, eng: eng.halt("Test") class BranchTest(object): workflow = [ IF_ELSE(always_true, [halt_engine], [halt_engine]) ] workflows['branchtest'] = BranchTest data = [set(('somekey', 'somevalue'))] eng = start('branchtest', data, module_name="unit_tests") idx, obj = list(eng.getObjects())[0] self.assertEqual(ObjectVersion.WAITING, obj.version) self.assertEqual(WorkflowStatus.HALTED, eng.status) self.assertEqual(0, BibWorkflowObjectLog.get( id_object=obj.id, log_type=logging.ERROR).count()) def test_object_creation_complete(self): """ Test status of object before/after workflow. When created before calling API, with "high" test-data that will make the workflow complete. """ from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.engine import WorkflowStatus from invenio.modules.workflows.api import start test_object = BibWorkflowObject() test_object.set_data(20) test_object.save() self.assertEqual(ObjectVersion.INITIAL, test_object.version) self.assertEqual(None, test_object.id_parent) self.assertEqual(20, test_object.get_data()) engine = start('test_workflow', [test_object], module_name="unit_tests") self.assertEqual(38, test_object.get_data()) self.assertEqual(None, test_object.id_parent) self.assertEqual(WorkflowStatus.COMPLETED, engine.status) self.assertEqual(ObjectVersion.COMPLETED, test_object.version) def test_object_creation_halt(self): """Test status of object before/after workflow. When created before calling API, with "low" test-data that will make the workflow halt. """ from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import start from invenio.modules.workflows.engine import WorkflowStatus test_object = BibWorkflowObject() test_object.set_data(2) test_object.save() self.assertEqual(ObjectVersion.INITIAL, test_object.version) self.assertEqual(None, test_object.id_parent) self.assertEqual(2, test_object.get_data()) engine = start('test_workflow', [test_object], module_name="unit_tests") self.assertEqual(2, test_object.get_data()) self.assertEqual(ObjectVersion.WAITING, test_object.version) self.assertEqual(WorkflowStatus.HALTED, engine.status) def test_workflow_engine_instantiation(self): """Check the proper init of the Workflow and BibWorkflowEngine.""" from invenio.modules.workflows.models import Workflow from invenio.modules.workflows.engine import BibWorkflowEngine from uuid import uuid1 as new_uuid test_workflow = Workflow(name='test_workflow', uuid=new_uuid(), id_user=0, module_name="Unknown", ) test_workflow_engine = BibWorkflowEngine(name=test_workflow.name, uuid=test_workflow.uuid) self.assertEqual(test_workflow.name, test_workflow_engine.name) def test_workflow_restarts(self): """Check if all is well when restarting a workflow several times.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import start, continue_oid from invenio.modules.workflows.engine import WorkflowStatus test_object = BibWorkflowObject() random.seed(time.time()) tries = 15 test_object.set_data(tries) test_object.save() engine = start('test_workflow_hardcore', [test_object], module_name="unit_tests") for i in range(0, tries): self.assertEqual(engine.status, WorkflowStatus.HALTED) for my_object_b in engine.getObjects(): engine = continue_oid(my_object_b[1].id, "restart_task") self.assertEqual(0, test_object.get_data()) self.assertEqual(ObjectVersion.COMPLETED, test_object.version) self.assertEqual(WorkflowStatus.COMPLETED, engine.status) def test_workflow_object_creation(self): """Test to see if the right snapshots or object versions are created.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import start initial_data = 22 final_data = 40 test_object = BibWorkflowObject() test_object.set_data(initial_data) test_object.save() workflow = start(workflow_name="test_workflow", data=[test_object], module_name="unit_tests") # Get parent object of the workflow we just ran initial_object = BibWorkflowObject.query.filter(BibWorkflowObject.id_parent == test_object.id).one() all_objects = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == workflow.uuid ).order_by(BibWorkflowObject.id).all() # There should only be 2 objects (initial, final) self.assertEqual(2, len(all_objects)) self.assertEqual(test_object.id, initial_object.id_parent) self.assertEqual(ObjectVersion.INITIAL, initial_object.version) self.assertEqual(initial_data, initial_object.get_data()) self.assertEqual(final_data, test_object.get_data()) self.assertEqual(ObjectVersion.COMPLETED, test_object.version) def test_workflow_object_creation_simple(self): """Test to see if the right snapshots or object versions are created.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import start initial_data = 22 final_data = 40 workflow = start(workflow_name="test_workflow", data=[initial_data], module_name="unit_tests") # Get parent object of the workflow we just ran initial_object = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == workflow.uuid, BibWorkflowObject.id_parent == None).first() # noqa E711 test_object = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == workflow.uuid, BibWorkflowObject.id_parent == initial_object.id).first() all_objects = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == workflow.uuid ).order_by(BibWorkflowObject.id).all() # There should only be 2 objects (initial, final) self.assertEqual(2, len(all_objects)) self.assertEqual(test_object.id_parent, initial_object.id) self.assertEqual(ObjectVersion.COMPLETED, initial_object.version) self.assertEqual(final_data, initial_object.get_data()) self.assertEqual(initial_data, test_object.get_data()) self.assertEqual(ObjectVersion.INITIAL, test_object.version) def test_workflow_complex_run(self): """Test running workflow with several data objects.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import start self.test_data = [1, 20] final_data = [1, 38] workflow = start(workflow_name="test_workflow", data=self.test_data, module_name="unit_tests") # Get parent objects of the workflow we just ran objects = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == workflow.uuid, BibWorkflowObject.id_parent == None # noqa E711 ).order_by(BibWorkflowObject.id).all() # Let's check that we found anything. # There should only be three objects self.assertEqual(2, len(objects)) all_objects = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == workflow.uuid ).order_by(BibWorkflowObject.id).all() self.assertEqual(4, len(all_objects)) for obj in objects: # The child object should have the final or halted version self.assertTrue(obj.child_objects[0].version in (ObjectVersion.INITIAL, ObjectVersion.HALTED)) # Making sure the final data is correct self.assertTrue(obj.get_data() in final_data) self.assertTrue(obj.child_objects[0].get_data() in self.test_data) def test_workflow_marcxml(self): """Test runnning a record ingestion workflow with a action step.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.engine import WorkflowStatus from invenio.modules.workflows.api import start initial_data = self.recxml workflow = start(workflow_name="marcxml_workflow", data=[initial_data], module_name="unit_tests") # Get objects of the workflow we just ran objects = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == workflow.uuid, BibWorkflowObject.id_parent == None # noqa E711 ).order_by(BibWorkflowObject.id).all() self._check_workflow_execution(objects, initial_data) all_objects = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == workflow.uuid ).order_by(BibWorkflowObject.id).all() self.assertEqual(2, len(all_objects)) self.assertEqual(WorkflowStatus.HALTED, workflow.status) current = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == workflow.uuid, BibWorkflowObject.version == ObjectVersion.HALTED ).one() self.assertEqual(current.get_action(), "approval") def test_workflow_for_halted_object(self): """Test workflow with continuing a halted object.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import start, continue_oid from invenio.modules.workflows.engine import WorkflowStatus current = BibWorkflowObject() current.set_data(self.recxml) current.save() workflow = start(workflow_name="marcxml_workflow", data=[current], module_name="unit_tests") self.assertEqual(WorkflowStatus.HALTED, workflow.status) self.assertEqual(ObjectVersion.HALTED, current.version) workflow = continue_oid(current.id, module_name="unit_tests") self.assertEqual(WorkflowStatus.COMPLETED, workflow.status) self.assertEqual(ObjectVersion.COMPLETED, current.version) def test_workflow_for_finished_object(self): """Test starting workflow with finished object given.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import start from invenio.modules.workflows.engine import WorkflowStatus current = BibWorkflowObject() current.set_data(20) current.save() workflow = start(workflow_name="test_workflow", data=[current], module_name="unit_tests") self.assertEqual(WorkflowStatus.COMPLETED, workflow.status) self.assertEqual(ObjectVersion.COMPLETED, current.version) self.assertEqual(38, current.get_data()) previous = BibWorkflowObject.query.get(current.id) workflow_2 = start(workflow_name="test_workflow", data=[previous], module_name="unit_tests") self.assertEqual(WorkflowStatus.COMPLETED, workflow_2.status) self.assertEqual(ObjectVersion.COMPLETED, previous.version) self.assertEqual(56, previous.get_data()) def test_logging_for_workflow_objects_without_workflow(self): """Test run a virtual object out of a workflow for test purpose.""" from invenio.modules.workflows.models import (BibWorkflowObject, BibWorkflowObjectLog, ObjectVersion) initial_data = 20 obj_init = BibWorkflowObject(id_workflow=11, version=ObjectVersion.INITIAL) obj_init.set_data(initial_data) obj_init.save() err_msg = "This is an error message" info_msg = "This is an info message" obj_init.log.info(info_msg) obj_init.log.error("This is an error message") # FIXME: loglevels are simply overwritten somewhere in Celery # even if Celery is not being "used". # # This means loglevel.DEBUG is NOT working at the moment! # debug_msg = "This is a debug message" # obj_init.log.debug(debug_msg) obj_init.save() obj_test = BibWorkflowObjectLog.query.filter( BibWorkflowObjectLog.id_object == obj_init.id).all() messages_found = 0 for current_obj in obj_test: if current_obj.message == info_msg and messages_found == 0: messages_found += 1 elif current_obj.message == err_msg and messages_found == 1: messages_found += 1 self.assertEqual(2, messages_found) def test_workflow_for_running_object(self): """Test workflow with running object given and watch it fail.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import start_by_oids from invenio.modules.workflows.errors import WorkflowObjectVersionError obj_running = BibWorkflowObject() obj_running.set_data(1234) obj_running.save(version=ObjectVersion.RUNNING) try: start_by_oids('test_workflow', [obj_running.id], module_name="unit_tests") except Exception as e: self.assertTrue(isinstance(e, WorkflowObjectVersionError)) obj_running.delete(e.id_object) obj_running.delete(obj_running) obj_running = BibWorkflowObject() obj_running.set_data(1234) obj_running.save(version=ObjectVersion.RUNNING) try: start_by_oids('test_workflow', [obj_running.id], module_name="unit_tests") except Exception as e: self.assertTrue(isinstance(e, WorkflowObjectVersionError)) obj_running.delete(e.id_object) obj_running.delete(obj_running) obj_running = BibWorkflowObject() obj_running.set_data(1234) obj_running.save(version=5) try: start_by_oids('test_workflow', [obj_running.id], module_name="unit_tests") except Exception as e: self.assertTrue(isinstance(e, WorkflowObjectVersionError)) obj_running.delete(e.id_object) obj_running.delete(obj_running) def test_continue_execution_for_object(self): """Test continuing execution of workflow for object given.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import start, continue_oid initial_data = 1 # testing restarting from previous task init_workflow = start("test_workflow", data=[initial_data], module_name="unit_tests") obj_halted = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == init_workflow.uuid, BibWorkflowObject.version == ObjectVersion.WAITING ).first() self.assertTrue(obj_halted) self.assertEqual(1, obj_halted.get_data()) # Try to restart, we should halt again actually. continue_oid(oid=obj_halted.id, start_point="restart_task", module_name="unit_tests") self.assertEqual(1, obj_halted.get_data()) self.assertEqual(ObjectVersion.WAITING, obj_halted.version) # We skip to next part, this should work continue_oid(oid=obj_halted.id, module_name="unit_tests") self.assertEqual(19, obj_halted.get_data()) self.assertEqual(ObjectVersion.COMPLETED, obj_halted.version) # Let's do that last task again, shall we? continue_oid(oid=obj_halted.id, start_point="restart_prev", module_name="unit_tests") self.assertEqual(37, obj_halted.get_data()) self.assertEqual(ObjectVersion.COMPLETED, obj_halted.version) def test_restart_workflow(self): """Test restarting workflow for given workflow id.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import start, start_by_wid initial_data = 1 init_workflow = start(workflow_name="test_workflow", data=[initial_data], module_name="unit_tests") init_objects = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == init_workflow.uuid ).order_by(BibWorkflowObject.id).all() self.assertEqual(2, len(init_objects)) restarted_workflow = start_by_wid(wid=init_workflow.uuid, module_name="unit_tests") # We expect the same workflow to be re-started self.assertTrue(init_workflow.uuid == restarted_workflow.uuid) restarted_objects = BibWorkflowObject.query.filter( BibWorkflowObject.id_workflow == restarted_workflow.uuid ).order_by(BibWorkflowObject.id).all() # This time we should only have one more initial object self.assertEqual(2, len(restarted_objects)) # Last object will be INITIAL self.assertEqual(ObjectVersion.INITIAL, restarted_objects[1].version) self.assertEqual(restarted_objects[1].id_parent, restarted_objects[0].id) def test_restart_failed_workflow(self): """Test restarting workflow for given workflow id.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.engine import WorkflowStatus from invenio.modules.workflows.api import start, start_by_oids from invenio.modules.workflows.errors import WorkflowError initial_data = BibWorkflowObject.create_object() initial_data.set_data(1) initial_data.save() self.assertRaises( WorkflowError, start, workflow_name="test_workflow_error", data=[initial_data], module_name="unit_tests" ) self.assertEqual(initial_data.version, ObjectVersion.ERROR) restarted_workflow = start_by_oids("test_workflow", oids=[initial_data.id], module_name="unit_tests") self.assertEqual(initial_data.version, ObjectVersion.WAITING) self.assertEqual(restarted_workflow.status, WorkflowStatus.HALTED) def _check_workflow_execution(self, objects, initial_data): """Test correct workflow execution.""" from invenio.modules.workflows.models import ObjectVersion # Let's check that we found anything. There should only be one object self.assertEqual(len(objects), 1) parent_object = objects[0] # The object should be the inital version self.assertEqual(ObjectVersion.HALTED, parent_object.version) # The object should have the inital data self.assertEqual(initial_data, objects[0].child_objects[0].get_data()) # Fetch final object which should exist final_object = objects[0].child_objects[0] self.assertTrue(final_object) class TestWorkflowTasks(WorkflowTasksTestCase): """Test meant for testing the the generic tasks available.""" def setUp(self): """Setup tests.""" self.create_registries() def tearDown(self): """Clean up tests.""" from invenio.modules.workflows.models import Workflow Workflow.get(Workflow.module_name == "unit_tests").delete() self.cleanup_registries() def test_logic_tasks_restart(self): """Test that the logic tasks work correctly when restarted.""" from invenio.modules.workflows.models import BibWorkflowObject from invenio.modules.workflows.api import (start, start_by_wid) test_object = BibWorkflowObject() test_object.set_data(0) test_object.save() # Initial run workflow = start('test_workflow_logic', [test_object], module_name="unit_tests") self.assertEqual(5, test_object.get_data()) self.assertEqual("lt9", test_object.get_extra_data()["test"]) # Reset before re-starting (reset Iterator data) workflow.reset_extra_data() workflow = start_by_wid(workflow.uuid) self.assertEqual(5, test_object.get_data()) self.assertEqual("lt9", test_object.get_extra_data()["test"]) def test_logic_tasks_continue(self): """Test that the logic tasks work correctly when continuing.""" from invenio.modules.workflows.models import (BibWorkflowObject, ObjectVersion) from invenio.modules.workflows.api import (start, continue_oid) from invenio.modules.workflows.engine import WorkflowStatus test_object = BibWorkflowObject() test_object.set_data(0) test_object.save() workflow = start('test_workflow_logic', [test_object], module_name="unit_tests") self.assertEqual(5, test_object.get_data()) self.assertEqual("lt9", test_object.get_extra_data()["test"]) workflow = continue_oid(test_object.id) self.assertEqual(6, test_object.get_data()) self.assertEqual("lt9", test_object.get_extra_data()["test"]) workflow = continue_oid(test_object.id) self.assertEqual(9, test_object.get_data()) self.assertEqual("gte9", test_object.get_extra_data()["test"]) workflow = continue_oid(test_object.id) self.assertEqual(15, test_object.get_data()) self.assertEqual("gte9", test_object.get_extra_data()["test"]) workflow = continue_oid(test_object.id) self.assertEqual(ObjectVersion.COMPLETED, test_object.version) self.assertEqual(WorkflowStatus.COMPLETED, workflow.status) def test_workflow_without_workflow_object_saved(self): """Test that the logic tasks work correctly.""" from invenio.modules.workflows.models import BibWorkflowObject from invenio.modules.workflows.api import start, start_by_wid test_object = BibWorkflowObject() test_object.set_data(0) test_object.save() workflow = start('test_workflow_logic', [test_object], module_name="unit_tests") self.assertEqual(5, test_object.get_data()) self.assertEqual("lt9", test_object.get_extra_data()["test"]) start_by_wid(workflow.uuid) test_object.delete(test_object.id) def test_workflow_task_results(self): """Test the setting and getting of task results.""" from invenio.modules.workflows.models import BibWorkflowObject test_object = BibWorkflowObject() test_object.save() # Saving is needed to instantiate default values test_object.add_task_result("test", {"data": "testing"}) results = test_object.get_tasks_results() self.assertEqual(len(results.get("test")), 1) result_item = results.get("test")[0] self.assertEqual({"data": "testing"}, result_item.get("result")) self.assertEqual("workflows/results/default.html", result_item.get("template")) self.assertEqual("test", result_item.get("name")) TEST_SUITE = make_test_suite(WorkflowTasksTestAPI, TestWorkflowTasks) if __name__ == "__main__": run_test_suite(TEST_SUITE)
kasioumis/invenio
invenio/modules/workflows/testsuite/test_workflows.py
Python
gpl-2.0
31,935
[ "Galaxy" ]
b6956f4cd0f835ce603181f9b524cbc30152fcde318cb35c64f22175f76847d0
""" Helper functions for accessing Paraview functionality .. moduleauthor:: Zenotech Ltd """ from paraview.simple import * # from paraview.vtk.util import numpy_support try: from paraview.vtk.dataset_adapter import numpyTovtkDataArray from paraview.vtk.dataset_adapter import Table from paraview.vtk.dataset_adapter import PolyData from paraview.vtk.dataset_adapter import DataSetAttributes from paraview.vtk.dataset_adapter import DataSet from paraview.vtk.dataset_adapter import CompositeDataSet from paraview.vtk.dataset_adapter import PointSet except: from paraview.vtk.numpy_interface.dataset_adapter import numpyTovtkDataArray from paraview.vtk.numpy_interface.dataset_adapter import Table from paraview.vtk.numpy_interface.dataset_adapter import PolyData from paraview.vtk.numpy_interface.dataset_adapter import DataSetAttributes from paraview.vtk.numpy_interface.dataset_adapter import DataSet from paraview.vtk.numpy_interface.dataset_adapter import CompositeDataSet from paraview.vtk.numpy_interface.dataset_adapter import PointSet import pylab as pl from zutil import rotate_vector import json from zutil import mag import math import time def sum_and_zone_filter_array(input, array_name, ignore_zone, filter=None): sum = [0.0, 0.0, 0.0] p = input.GetCellData().GetArray(array_name) z = input.GetCellData().GetArray("zone") numCells = input.GetNumberOfCells() for x in range(numCells): if len(ignore_zone) == 0: v = p.GetTuple(x) for i in range(0, 3): sum[i] += v[i] else: zone = z.GetValue(x) if zone not in ignore_zone: v = p.GetTuple(x) if filter is None or filter.test(input, x): # print 'Zone: %i'%(zone) for i in range(0, 3): sum[i] += v[i] return sum def sum_and_zone_filter(input, array_name, ignore_zone, filter=None): sum = [0.0, 0.0, 0.0] if input.IsA("vtkMultiBlockDataSet"): iter = input.NewIterator() iter.UnRegister(None) iter.InitTraversal() while not iter.IsDoneWithTraversal(): cur_input = iter.GetCurrentDataObject() v = sum_and_zone_filter_array(cur_input, array_name, ignore_zone, filter) for i in range(0, 3): sum[i] += v[i] iter.GoToNextItem() else: sum = sum_and_zone_filter_array(input, array_name, ignore_zone, filter) return sum class GeomFilterLT: def __init__(self, val, idx): # self.val = val self.idx = idx def test(self, input, x): centre = input.GetCellData().GetArray("centre").GetTuple(x) if centre[self.idx] < self.val: return True else: return False class GeomFilterGT: def __init__(self, val, idx): # self.val = val self.idx = idx def test(self, input, x): centre = input.GetCellData().GetArray("centre").GetTuple(x) if centre[self.idx] >= self.val: return True else: return False def calc_force_from_file(file_name, ignore_zone, half_model=False, filter=None, **kwargs): """ Calculates the pressure and friction force This function requires that the VTK file contains three cell data arrays called pressureforce, frictionforce and zone Args: file_name (str): the VTK file name including path ignore_zone (list): List of zones to be ignored Kwargs: half_nodel (bool): Does the data represent only half of the model filter (function): Returns: float, float. pressure force and friction force """ wall = PVDReader(FileName=file_name) wall.UpdatePipeline() return calc_force(wall, ignore_zone, half_model, filter, kwargs) def calc_force_wall(file_root, ignore_zone, half_model=False, filter=None, **kwargs): wall = PVDReader(FileName=file_root+'_wall.pvd') wall.UpdatePipeline() return calc_force(wall, ignore_zone, half_model, filter, **kwargs) def calc_force(surface_data, ignore_zone, half_model=False, filter=None, **kwargs): alpha = 0.0 if 'alpha' in kwargs: alpha = kwargs['alpha'] beta = 0.0 if 'beta' in kwargs: beta = kwargs['beta'] sum_client = servermanager.Fetch(surface_data) pforce = sum_and_zone_filter(sum_client, "pressureforce", ignore_zone, filter) fforce = sum_and_zone_filter(sum_client, "frictionforce", ignore_zone, filter) pforce = rotate_vector(pforce, alpha, beta) fforce = rotate_vector(fforce, alpha, beta) if half_model: for i in range(0, 3): pforce[i] *= 2.0 fforce[i] *= 2.0 return pforce, fforce def get_span(wall): """ Returns the min and max y ordinate Args: wall (vtkMultiBlockDataSet): The input surface Returns: (float,float). Min y, Max y """ Calculator1 = Calculator(Input=wall) Calculator1.AttributeMode = 'Point Data' Calculator1.Function = 'coords.jHat' Calculator1.ResultArrayName = 'ypos' Calculator1.UpdatePipeline() ymin = MinMax(Input=Calculator1) ymin.Operation = "MIN" ymin.UpdatePipeline() ymin_client = servermanager.Fetch(ymin) min_pos = ymin_client.GetPointData().GetArray("ypos").GetValue(0) ymax = MinMax(Input=Calculator1) ymax.Operation = "MAX" ymax.UpdatePipeline() ymax_client = servermanager.Fetch(ymax) max_pos = ymax_client.GetPointData().GetArray("ypos").GetValue(0) Delete(ymin) Delete(ymax) Delete(Calculator1) return [min_pos, max_pos] def get_chord(slice, rotate_geometry=[0.0, 0.0, 0.0]): """ Returns the min and max x ordinate Args: wall (vtkMultiBlockDataSet): The input surface Returns: (float,float). Min x, Max x """ transform = Transform(Input=slice, Transform="Transform") transform.Transform.Scale = [1.0, 1.0, 1.0] transform.Transform.Translate = [0.0, 0.0, 0.0] transform.Transform.Rotate = rotate_geometry transform.UpdatePipeline() Calculator1 = Calculator(Input=transform) Calculator1.AttributeMode = 'Point Data' Calculator1.Function = 'coords.iHat' Calculator1.ResultArrayName = 'xpos' Calculator1.UpdatePipeline() xmin = MinMax(Input=Calculator1) xmin.Operation = "MIN" xmin.UpdatePipeline() xmin_client = servermanager.Fetch(xmin) min_pos = xmin_client.GetPointData().GetArray("xpos").GetValue(0) xmax = MinMax(Input=Calculator1) xmax.Operation = "MAX" xmax.UpdatePipeline() xmax_client = servermanager.Fetch(xmax) max_pos = xmax_client.GetPointData().GetArray("xpos").GetValue(0) Delete(xmin) Delete(xmax) Delete(Calculator1) Delete(transform) return [min_pos, max_pos] def get_chord_spanwise(slice): Calculator1 = Calculator(Input=slice) Calculator1.AttributeMode = 'Point Data' Calculator1.Function = 'coords.jHat' Calculator1.ResultArrayName = 'ypos' Calculator1.UpdatePipeline() ymin = MinMax(Input=Calculator1) ymin.Operation = "MIN" ymin.UpdatePipeline() ymin_client = servermanager.Fetch(ymin) min_pos = ymin_client.GetPointData().GetArray("ypos").GetValue(0) ymax = MinMax(Input=Calculator1) ymax.Operation = "MAX" ymax.UpdatePipeline() ymax_client = servermanager.Fetch(ymax) max_pos = ymax_client.GetPointData().GetArray("ypos").GetValue(0) Delete(ymin) Delete(ymax) Delete(Calculator1) return [min_pos, max_pos] def get_monitor_data(file, monitor_name, var_name): """ Return the _report file data corresponding to a monitor point and variable name """ monitor = CSVReader(FileName=[file]) monitor.HaveHeaders = 1 monitor.MergeConsecutiveDelimiters = 1 monitor.UseStringDelimiter = 0 monitor.DetectNumericColumns = 1 monitor.FieldDelimiterCharacters = ' ' monitor.UpdatePipeline() monitor_client = servermanager.Fetch(monitor) table = Table(monitor_client) data = table.RowData names = data.keys() num_var = len(names)-2 if (str(monitor_name) + "_" + str(var_name) in names): index = names.index(str(monitor_name) + "_" + str(var_name)) return (data[names[0]],data[names[index]]) else: print 'POST.PY: MONITOR POINT: ' + str(monitor_name) + "_" + str(var_name) + ' NOT FOUND' def residual_plot(file): """ Plot the _report file """ l2norm = CSVReader(FileName=[file]) l2norm.HaveHeaders = 1 l2norm.MergeConsecutiveDelimiters = 1 l2norm.UseStringDelimiter = 0 l2norm.DetectNumericColumns = 1 l2norm.FieldDelimiterCharacters = ' ' l2norm.UpdatePipeline() l2norm_client = servermanager.Fetch(l2norm) table = Table(l2norm_client) data = table.RowData names = data.keys() num_var = len(names)-2 num_rows = ((num_var-1)/4)+1 fig = pl.figure(figsize=(40, 10*num_rows), dpi=100, facecolor='w', edgecolor='k') fig.suptitle(file, fontsize=40, fontweight='bold') for i in range(1, num_var+1): var_name = names[i] ax = fig.add_subplot(num_rows, 4, i) if 'rho' in var_name: ax.set_yscale('log') ax.set_ylabel('l2norm '+var_name, multialignment='center') else: ax.set_ylabel(var_name, multialignment='center') ax.grid(True) ax.set_xlabel('Cycles') ax.plot(data[names[0]], data[names[i]], color='r', label=names[i]) def for_each(surface, func, **kwargs): if surface.IsA("vtkMultiBlockDataSet"): iter = surface.NewIterator() iter.UnRegister(None) iter.InitTraversal() while not iter.IsDoneWithTraversal(): cur_input = iter.GetCurrentDataObject() # numCells = cur_input.GetNumberOfCells() numPts = cur_input.GetNumberOfPoints() if numPts > 0: calc = DataSet(cur_input) pts = PointSet(cur_input) func(calc, pts, **kwargs) iter.GoToNextItem() else: calc = DataSet(surface) pts = PointSet(surface) func(calc, pts, **kwargs) def cp_profile_wall_from_file(file_root, slice_normal, slice_origin, **kwargs): wall = PVDReader(FileName=file_root+'_wall.pvd') clean = CleantoGrid(Input=wall) clean.UpdatePipeline() merged = MergeBlocks(Input=clean) merged.UpdatePipeline() return cp_profile(merged, slice_normal, slice_origin, **kwargs) def cp_profile_wall_from_file_span(file_root, slice_normal, slice_origin, **kwargs): wall = PVDReader(FileName=file_root+'_wall.pvd') clean = CleantoGrid(Input=wall) clean.UpdatePipeline() merged = MergeBlocks(Input=clean) merged.UpdatePipeline() return cp_profile_span(merged, slice_normal, slice_origin, **kwargs) def cp_profile(surface, slice_normal, slice_origin, **kwargs): alpha = 0.0 if 'alpha' in kwargs: alpha = kwargs['alpha'] beta = 0.0 if 'beta' in kwargs: beta = kwargs['beta'] time_average = False if 'time_average' in kwargs: time_average = kwargs['time_average'] rotate_geometry = [0.0, 0.0, 0.0] if 'rotate_geometry' in kwargs: rotate_geometry = kwargs['rotate_geometry'] point_data = CellDatatoPointData(Input=surface) point_data.PassCellData = 1 slice = Slice(Input=point_data, SliceType="Plane") slice.SliceType.Normal = slice_normal slice.SliceType.Origin = slice_origin slice.UpdatePipeline() if time_average: temporal = TemporalStatistics(Input=slice) temporal.ComputeMaximum = 0 temporal.ComputeStandardDeviation = 0 temporal.ComputeMinimum = 0 temporal.UpdatePipeline() slice = temporal offset = get_chord(slice, rotate_geometry) transform = Transform(Input=slice, Transform="Transform") transform.Transform.Scale = [1.0, 1.0, 1.0] transform.Transform.Translate = [0.0, 0.0, 0.0] transform.Transform.Rotate = rotate_geometry transform.UpdatePipeline() chord_calc = Calculator(Input=transform) chord_calc.AttributeMode = 'Point Data' chord_calc.Function = ('(coords.iHat - ' + str(offset[0]) + ')/' + str(offset[1]-offset[0])) chord_calc.ResultArrayName = 'chord' # Attempt to calculate forces pforce = [0.0, 0.0, 0.0] fforce = [0.0, 0.0, 0.0] sum = MinMax(Input=slice) sum.Operation = "SUM" sum.UpdatePipeline() sum_client = servermanager.Fetch(sum) if (sum_client.GetCellData().GetArray("pressureforce") and sum_client.GetCellData().GetArray("frictionforce")): pforce = sum_client.GetCellData().GetArray("pressureforce").GetTuple(0) fforce = sum_client.GetCellData().GetArray("frictionforce").GetTuple(0) pforce = rotate_vector(pforce, alpha, beta) fforce = rotate_vector(fforce, alpha, beta) """ # Add sectional force integration sorted_line = PlotOnSortedLines(Input=chord_calc) sorted_line.UpdatePipeline() sorted_line = servermanager.Fetch(sorted_line) cp_array = sorted_line.GetCellData().GetArray("cp") for i in range(0,len(cp_array)): sorted_line.GetPointData().GetArray("X") pass """ if 'func' in kwargs: sorted_line = PlotOnSortedLines(Input=chord_calc) sorted_line.UpdatePipeline() extract_client = servermanager.Fetch(sorted_line) for_each(extract_client, **kwargs) return {'pressure force': pforce, 'friction force': fforce} def cp_profile_span(surface, slice_normal, slice_origin, **kwargs): alpha = 0.0 if 'alpha' in kwargs: alpha = kwargs['alpha'] beta = 0.0 if 'beta' in kwargs: beta = kwargs['beta'] point_data = CellDatatoPointData(Input=surface) point_data.PassCellData = 1 clip = Clip(Input=point_data, ClipType="Plane") clip.ClipType.Normal = [0.0, 1.0, 0.0] clip.ClipType.Origin = [0.0, 0.0, 0.0] clip.UpdatePipeline() slice = Slice(Input=clip, SliceType="Plane") slice.SliceType.Normal = slice_normal slice.SliceType.Origin = slice_origin slice.UpdatePipeline() offset = get_chord_spanwise(slice) # define the cuts and make sure the is the one one you want # make the chord_calc = Calculator(Input=slice) chord_calc.AttributeMode = 'Point Data' chord_calc.Function = ('(coords.jHat - ' + str(offset[0]) + ')/' + str(offset[1]-offset[0])) chord_calc.ResultArrayName = 'chord' sum = MinMax(Input=slice) sum.Operation = "SUM" sum.UpdatePipeline() sum_client = servermanager.Fetch(sum) pforce = sum_client.GetCellData().GetArray("pressureforce").GetTuple(0) fforce = sum_client.GetCellData().GetArray("frictionforce").GetTuple(0) pforce = rotate_vector(pforce, alpha, beta) fforce = rotate_vector(fforce, alpha, beta) if 'func' in kwargs: sorted_line = PlotOnSortedLines(Input=chord_calc) sorted_line.UpdatePipeline() extract_client = servermanager.Fetch(sorted_line) for_each(extract_client, **kwargs) return {'pressure force': pforce, 'friction force': fforce} def cf_profile(surface, slice_normal, slice_origin, **kwargs): alpha = 0.0 if 'alpha' in kwargs: alpha = kwargs['alpha'] beta = 0.0 if 'beta' in kwargs: beta = kwargs['beta'] point_data = CellDatatoPointData(Input=surface) point_data.PassCellData = 1 slice = Slice(Input=point_data, SliceType="Plane") slice.SliceType.Normal = slice_normal slice.SliceType.Origin = slice_origin slice.UpdatePipeline() offset = get_chord(slice) chord_calc = Calculator(Input=slice) chord_calc.AttributeMode = 'Point Data' chord_calc.Function = ('(coords.iHat - ' + str(offset[0]) + ')/' + str(offset[1]-offset[0])) chord_calc.ResultArrayName = 'chord' cf_calc = Calculator(Input=chord_calc) cf_calc.AttributeMode = 'Point Data' cf_calc.Function = 'mag(cf)' cf_calc.ResultArrayName = 'cfmag' sum = MinMax(Input=slice) sum.Operation = "SUM" sum.UpdatePipeline() sum_client = servermanager.Fetch(sum) pforce = sum_client.GetCellData().GetArray("pressureforce").GetTuple(0) fforce = sum_client.GetCellData().GetArray("frictionforce").GetTuple(0) pforce = rotate_vector(pforce, alpha, beta) fforce = rotate_vector(fforce, alpha, beta) if 'func' in kwargs: sorted_line = PlotOnSortedLines(Input=cf_calc) sorted_line.UpdatePipeline() extract_client = servermanager.Fetch(sorted_line) for_each(extract_client, **kwargs) return {'pressure force': pforce, 'friction force': fforce} import csv def get_csv_data(filename, header=False, remote=False, delim=' '): """ Get csv data """ if remote: theory = CSVReader(FileName=[filename]) theory.HaveHeaders = 0 if header: theory.HaveHeaders = 1 theory.MergeConsecutiveDelimiters = 1 theory.UseStringDelimiter = 0 theory.DetectNumericColumns = 1 theory.FieldDelimiterCharacters = delim theory.UpdatePipeline() theory_client = servermanager.Fetch(theory) table = Table(theory_client) data = table.RowData else: import pandas as pd if not header: data = pd.read_csv(filename, sep=delim, header=None) else: data = pd.read_csv(filename, sep=delim) return data def get_fw_csv_data(filename, widths, header=False, remote=False, **kwargs): if remote: theory = CSVReader(FileName=[filename]) theory.HaveHeaders = 0 theory.MergeConsecutiveDelimiters = 1 theory.UseStringDelimiter = 0 theory.DetectNumericColumns = 1 theory.FieldDelimiterCharacters = ' ' theory.UpdatePipeline() theory_client = servermanager.Fetch(theory) table = Table(theory_client) data = table.RowData else: import pandas as pd if not header: data = pd.read_fwf(filename, sep=' ', header=None, widths=widths, **kwargs) else: data = pd.read_fwf(filename, sep=' ', width=widths, **kwargs) return data def screenshot(wall): # position camera view = GetActiveView() if not view: # When using the ParaView UI, the View will be present, not otherwise. view = CreateRenderView() view.CameraViewUp = [0, 0, 1] view.CameraFocalPoint = [0, 0, 0] view.CameraViewAngle = 45 view.CameraPosition = [5, 0, 0] # draw the object Show() # set the background color view.Background = [1, 1, 1] # white # set image size view.ViewSize = [200, 300] # [width, height] dp = GetDisplayProperties() # set point color dp.AmbientColor = [1, 0, 0] # red # set surface color dp.DiffuseColor = [0, 1, 0] # blue # set point size dp.PointSize = 2 # set representation dp.Representation = "Surface" Render() # save screenshot WriteImage("test.png") def sum_array(input, array_name): sum = [0.0, 0.0, 0.0] p = input.GetCellData().GetArray(array_name) numCells = input.GetNumberOfCells() for x in range(numCells): v = p.GetTuple(x) for i in range(0, 3): sum[i] += v[i] return sum from fabric.api import (env, run, cd, get, hide, settings, remote_tunnel, show, shell_env) from fabric.tasks import execute import logging log = logging.getLogger("paramiko.transport") sh = logging.StreamHandler() sh.setLevel(logging.DEBUG) log.addHandler(sh) import sys import multiprocessing as mp from multiprocessing import Process, Value process_id = None use_multiprocess = True # Uncomment for output logging # logger = mp.get_logger() # logger.addHandler(logging.StreamHandler(sys.stdout)) # logger.setLevel(mp.SUBDEBUG) def pvserver(remote_dir, paraview_cmd, paraview_port, paraview_remote_port): with show('debug'), remote_tunnel(int(paraview_remote_port),local_port=int(paraview_port)), cd(remote_dir): # with cd(remote_dir): if not use_multiprocess: run('sleep 2;'+paraview_cmd+'</dev/null &>/dev/null&', pty=False) else: # # run('sleep 2;'+paraview_cmd+'&>/dev/null',pty=False) run('sleep 2;'+paraview_cmd) # , pty=False) # run(paraview_cmd+'</dev/null &>/dev/null',pty=False) # run('screen -d -m "yes"') # ssh asrc2 "(ls</dev/null &>/dev/null&) 2>&1; true" 2>/dev/null || echo SSH connection or remote command failed - either of them returned non-zero exit code $? def pvcluster(remote_dir, paraview_home, paraview_args, paraview_port, paraview_remote_port, job_dict): with show('debug'), remote_tunnel(int(paraview_remote_port), local_port=int(paraview_port)): with shell_env(PARAVIEW_HOME=paraview_home, PARAVIEW_ARGS=paraview_args): run('echo $PARAVIEW_HOME') run('echo $PARAVIEW_ARGS') run('mkdir -p '+remote_dir) with cd(remote_dir): cmd_line = 'mycluster --create pvserver.job --jobname=pvserver' cmd_line += ' --jobqueue ' + job_dict['job_queue'] cmd_line += ' --ntasks ' + job_dict['job_ntasks'] cmd_line += ' --taskpernode ' + job_dict['job_ntaskpernode'] if 'vizstack' in paraview_args: cmd_line += ' --script mycluster-viz-paraview.bsh' else: cmd_line += ' --script mycluster-paraview.bsh' cmd_line += ' --project ' + job_dict['job_project'] run(cmd_line) run('chmod u+rx pvserver.job') run('mycluster --immediate --submit pvserver.job') def port_test(rport, lport): # Run a test with hide('everything'), remote_tunnel(int(rport), local_port=int(lport)): run('cd') def get_case_file(): with cd(remote_dir): get(case_name+'.py', '%(path)s') def cat_case_file(remote_dir, case_name): with cd(remote_dir): with hide('output', 'running', 'warnings'), settings(warn_only=True): # cmd = 'cat '+case_name+'.py' import StringIO contents = StringIO.StringIO() get(case_name+'.py', contents) # operate on 'contents' like a file object here, e.g. 'print return contents.getvalue() def cat_status_file(remote_dir, case_name): with cd(remote_dir): with hide('output', 'running', 'warnings'), settings(warn_only=True): # cmd = 'cat '+case_name+'_status.txt' import StringIO contents = StringIO.StringIO() result = get(case_name+'_status.txt', contents) if result.succeeded: # operate on 'contents' like a file object here, e.g. 'print return contents.getvalue() else: return None def run_uname(with_tunnel): with hide('everything'): run('uname -a') def test_ssh(status, **kwargs): global data_host _remote_host = data_host if 'data_host' in kwargs: _remote_host = kwargs['data_host'] try: env.use_ssh_config = True execute(run_uname, False, hosts=[_remote_host]) except: status.value = 0 return False return True def test_ssh_mp(**kwargs): # print 'Starting test ssh' status = Value('i', 1) process_id = mp.Process(target=test_ssh, args=(status,), kwargs=kwargs) process_id.start() process_id.join() if status.value == 0: return False return True def test_remote_tunnel(**kwargs): global data_host _remote_host = data_host if 'data_host' in kwargs: _remote_host = kwargs['data_host'] try: env.use_ssh_config = True execute(run_uname, True, hosts=[_remote_host]) except: return False return True def get_remote_port(**kwargs): global data_host, paraview_remote_port, paraview_port _remote_host = data_host if 'data_host' in kwargs: _remote_host = kwargs['data_host'] paraview_port = '11111' if 'paraview_port' in kwargs: paraview_port = kwargs['paraview_port'] paraview_remote_port = '11113' if 'paraview_remote_port' in kwargs: paraview_remote_port = kwargs['paraview_remote_port'] else: # Attempt to find an unused remote port print 'Attempting to find unused port' for p in range(12000, 13000): tp = Value('i', p) process_id = mp.Process(target=test_remote_port, args=(port_test, tp, paraview_port, _remote_host)) process_id.start() process_id.join() # print tp.value if tp.value != 0: break print 'Selected Port: '+str(p) paraview_remote_port = p def test_remote_port(port_test, port, paraview_port, remote_host): try: env.use_ssh_config = True execute(port_test, port.value, paraview_port, hosts=[remote_host]) return True except: port.value = 0 return False def pvserver_start(remote_host, remote_dir, paraview_cmd): if paraview_cmd is not None: env.use_ssh_config = True execute(pvserver, remote_dir, paraview_cmd, hosts=[remote_host]) def pvserver_connect(**kwargs): """ Be careful when adding to this function fabric execute calls do not play well with multiprocessing. Do not mix direct fabric execute call and mp based fabric execute calls """ global remote_data, data_dir, data_host, remote_server_auto global paraview_cmd, process_id, paraview_port, paraview_remote_port global process_id _paraview_cmd = paraview_cmd if 'paraview_cmd' in kwargs: _paraview_cmd = kwargs['paraview_cmd'] if '-sp' in _paraview_cmd or '--client-host' in _paraview_cmd: print('pvserver_process: Please only provide pvserver' 'executable path and name without arguments') print 'e.g. mpiexec -n 1 /path_to_pvserver/bin/pvserver' return False # Add Check for passwordless ssh print 'Testing passwordless ssh access' if not test_ssh_mp(**kwargs): print 'ERROR: Passwordless ssh access to data host failed' return False print '-> Passed' # Add check for paraview version # Find free remote port get_remote_port(**kwargs) paraview_port = '11111' if 'paraview_port' in kwargs: paraview_port = kwargs['paraview_port'] if not use_multiprocess: pvserver_process(**kwargs) else: print 'Starting pvserver connect' process_id = mp.Process(target=pvserver_process, kwargs=kwargs) process_id.start() # process_id.join() # time.sleep(6) ReverseConnect(paraview_port) return True def pvcluster_process(**kwargs): pvserver_process(**kwargs) def pvserver_process(**kwargs): global remote_data, data_dir, data_host, remote_server_auto global paraview_cmd, paraview_home, paraview_port, paraview_remote_port print 'Starting pvserver process' _remote_dir = data_dir if 'data_dir' in kwargs: _remote_dir = kwargs['data_dir'] _paraview_cmd = paraview_cmd if 'paraview_cmd' in kwargs: _paraview_cmd = kwargs['paraview_cmd'] _paraview_home = paraview_home if 'paraview_home' in kwargs: _paraview_home = kwargs['paraview_home'] paraview_port = '11111' if 'paraview_port' in kwargs: paraview_port = kwargs['paraview_port'] """ _job_ntasks = 1 if 'job_ntasks' in kwargs: _job_ntasks = kwargs['job_ntasks'] """ _remote_host = data_host if 'data_host' in kwargs: _remote_host = kwargs['data_host'] # This global variable may have already been set so check if 'paraview_remote_port' not in globals(): paraview_remote_port = '11113' if 'paraview_remote_port' in kwargs: paraview_remote_port = kwargs['paraview_remote_port'] else: # Attempt to find an unused remote port print 'Attempting to find unused port' for p in range(12000, 13000): try: env.use_ssh_config = True execute(port_test, p, paraview_port, hosts=[_remote_host]) break except: pass print 'Selected Port: '+str(p) paraview_remote_port = p if 'job_queue' in kwargs: # Submit job remote_hostname = _remote_host[_remote_host.find('@')+1:] if 'vizstack' in kwargs: paraview_args = ('/opt/vizstack/bin/viz-paraview -r ' + str(kwargs['job_ntasks']) + ' -c ' + remote_hostname + ' -p ' + str(paraview_remote_port)) else: paraview_args = (' -rc --client-host=' + remote_hostname + ' -sp=' + str(paraview_remote_port)) print paraview_args job_dict = { 'job_queue': kwargs['job_queue'], 'job_ntasks': kwargs['job_ntasks'], 'job_ntaskpernode': kwargs['job_ntaskpernode'], 'job_project': kwargs['job_project'], } if _paraview_home is not None: env.use_ssh_config = True execute(pvcluster, _remote_dir, _paraview_home, paraview_args, paraview_port, paraview_remote_port, job_dict, hosts=[_remote_host]) else: # Run Paraview if '-sp' in _paraview_cmd or '--client-host' in _paraview_cmd: print ('pvserver_process: Please only provide pvserver' 'executable path and name without arguments') print 'e.g. mpiexec -n 1 /path_to_pvserver/bin/pvserver' return False if 'vizstack' in kwargs: _paraview_cmd = (_paraview_cmd + ' -c localhost ' + ' -p ' + str(paraview_remote_port)) else: _paraview_cmd = (_paraview_cmd + ' -rc --client-host=localhost -sp=' + str(paraview_remote_port)) if _paraview_cmd is not None: env.use_ssh_config = True execute(pvserver, _remote_dir, _paraview_cmd, paraview_port, paraview_remote_port, hosts=[_remote_host]) def pvserver_disconnect(): Disconnect() if process_id: process_id.terminate() def get_case_parameters(case_name, **kwargs): global remote_data, data_dir, data_host, remote_server_auto, paraview_cmd _remote_dir = data_dir if 'data_dir' in kwargs: _remote_dir = kwargs['data_dir'] _remote_host = data_host if 'data_host' in kwargs: _remote_host = kwargs['data_host'] env.use_ssh_config = True env.host_string = _remote_host case_file_str = cat_case_file(_remote_dir, case_name) exec case_file_str return parameters def get_status_dict(case_name, **kwargs): global remote_data, data_dir, data_host, remote_server_auto, paraview_cmd _remote_data = remote_data if 'remote_data' in kwargs: _remote_data = kwargs['remote_data'] if _remote_data: _remote_dir = data_dir if 'data_dir' in kwargs: _remote_dir = kwargs['data_dir'] _remote_host = data_host if 'data_host' in kwargs: _remote_host = kwargs['data_host'] env.use_ssh_config = True env.host_string = _remote_host status_file_str = cat_status_file(_remote_dir, case_name) if status_file_str is not None: # print status_file_str return json.loads(status_file_str) else: print 'WARNING: '+case_name+'_status.txt file not found' return None else: # Get contents of local file with open(case_name+'_status.txt') as f: status_file_str = f.read() if status_file_str is not None: # print status_file_str return json.loads(status_file_str) else: print 'WARNING: '+case_name+'_status.txt file not found' return None def get_num_procs(case_name, **kwargs): # remote_host,remote_dir,case_name): status = get_status_dict(case_name, **kwargs) if 'num processor' in status: return status['num processor'] else: return None def get_case_root(case_name, num_procs): return case_name+'_P'+num_procs+'_OUTPUT/'+case_name def get_case_report(case): return case+'_report.csv' def print_html_parameters(parameters): reference = parameters['reference'] # material = parameters['material'] conditions = parameters[reference] mach = 0.0 speed = 0.0 if 'Mach' in conditions['V']: mach = conditions['V']['Mach'] speed = 0.0 else: speed = mag(conditions['V']['vector']) mach = 0.0 if 'Reynolds No' in conditions: reynolds = conditions['Reynolds No'] else: reynolds = 'undefined' if 'Reference Length' in conditions: reflength = conditions['Reference Length'] else: reflength = 'undefined' import string html_template = '''<table> <tr><td>pressure</td><td>$pressure</td></tr> <tr><td>temperature</td><td>$temperature</td></tr> <tr><td>Reynolds No</td><td>$reynolds</td></tr> <tr><td>Ref length</td><td>$reflength</td></tr> <tr><td>Speed</td><td>$speed</td></tr> <tr><td>Mach No</td><td>$mach</td></tr> </table>''' html_output=string.Template(html_template) return html_output.substitute({'pressure':conditions['pressure'], 'temperature':conditions['temperature'], 'reynolds':reynolds, 'reflength':reflength, 'speed':speed, 'mach':mach, }) import uuid import time from IPython.display import HTML, Javascript, display class ProgressBar(object): def __init__(self): self.divid = str(uuid.uuid4()) self.val = 0 pb = HTML( """ <div style="border: 1px solid black; width:500px"> <div id="%s" style="background-color:grey; width:0%%">&nbsp;</div> </div> """ % self.divid) display(pb) def __iadd__(self, v): self.update(self.val+v) return self def complete(self): self.update(100) display(Javascript("$('div#%s').hide()" % (self.divid))) def update(self, i): self.val = i display(Javascript("$('div#%s').width('%i%%')" % (self.divid, i))) remote_data = True data_dir = 'data' data_host = 'user@server' remote_server_auto = True paraview_cmd = 'mpiexec pvserver' paraview_home = '/usr/local/bin/' job_queue = 'default' job_tasks = 1 job_ntaskpernode = 1 job_project = 'default' def data_location_form_html(**kwargs): global remote_data, data_dir, data_host, remote_server_auto, paraview_cmd global job_queue, job_tasks, job_ntaskpernode, job_project if 'data_dir' in kwargs: data_dir = kwargs['data_dir'] if 'paraview_cmd' in kwargs: paraview_cmd = kwargs['paraview_cmd'] if 'data_host' in kwargs: data_host = kwargs['data_host'] remote_data_checked = '' if remote_data: remote_data_checked = 'checked="checked"' remote_server_auto_checked = '' if remote_server_auto: remote_server_auto_checked = 'checked="checked"' remote_cluster_checked = '' job_queue = 'default' job_tasks = 1 job_ntaskpernode = 1 job_project = 'default' input_form = """ <div style="background-color:gainsboro; border:solid black; width:640px; padding:20px;"> <label style="width:22%;display:inline-block">Remote Data</label> <input type="checkbox" id="remote_data" value="remote" {remote_data_checked}><br> <label style="width:22%;display:inline-block">Data Directory</label> <input style="width:75%;" type="text" id="data_dir" value="{data_dir}"><br> <label style="width:22%;display:inline-block">Data Host</label> <input style="width:75%;" type="text" id="data_host" value="{data_host}"><br> <label style="width:22%;display:inline-block">Remote Server Auto</label> <input type="checkbox" id="remote_server_auto" value="remote_auto" {remote_server_auto_checked}><br> <label style="width:22%;display:inline-block">Paraview Cmd </label> <input style="width:75%;" type="text" id="paraview_cmd" value="{paraview_cmd}"><br> <label style="width:22%;display:inline-block">Remote Cluster</label> <input type="checkbox" id="remote_cluster" value="remote_cluster" {remote_cluster_checked}><br> <label style="width:22%;display:inline-block">Job Queue </label> <input style="width:75%;" type="text" id="job_queue" value="{job_queue}"><br> <label style="width:22%;display:inline-block">Job Tasks </label> <input style="width:75%;" type="text" id="job_tasks" value="{job_tasks}"><br> <label style="width:22%;display:inline-block">Job Tasks per Node </label> <input style="width:75%;" type="text" id="job_ntaskpernode" value="{job_ntaskpernode}"><br> <label style="width:22%;display:inline-block">Job Project </label> <input style="width:75%;" type="text" id="job_project" value="{job_project}"><br> <button onclick="apply()">Apply</button> </div> """ javascript = """ <script type="text/Javascript"> function apply(){ var remote_data = ($('input#remote_data').is(':checked') ? 'True' : 'False'); var data_dir = $('input#data_dir').val(); var data_host = $('input#data_host').val(); var remote_server_auto = ($('input#remote_server_auto').is(':checked') ? 'True' : 'False'); var paraview_cmd = $('input#paraview_cmd').val(); var remote_cluster = ($('input#remote_cluster').is(':checked') ? 'True' : 'False'); var kernel = IPython.notebook.kernel; // Send data dir to ipython var command = "from zutil import post; post.data_dir = '" + data_dir + "'"; console.log("Executing Command: " + command); kernel.execute(command); // Send data host to ipython var command = "from zutil import post; post.data_host = '" + data_host + "'"; console.log("Executing Command: " + command); kernel.execute(command); // Send remote server flag to ipython var command = "from zutil import post; post.remote_server_auto = " + remote_server_auto; console.log("Executing Command: " + command); kernel.execute(command); // Send paraview command to ipython var command = "from zutil import post; post.paraview_cmd = '" + paraview_cmd + "'"; console.log("Executing Command: " + command); kernel.execute(command); // Send remote data flag to ipython var command = "from zutil import post; post.remote_data = " + remote_data ; console.log("Executing Command: " + command); kernel.execute(command); // Set paraview command to none if not using remote server var command = "from zutil import post; if not post.remote_server_auto: post.paraview_cmd=None" console.log("Executing Command: " + command); kernel.execute(command); // Set data to local host for local data var command = "from zutil import post; if not post.post.remote_data: post.data_host='localhost'; post.paraview_cmd=None" console.log("Executing Command: " + command); kernel.execute(command); if(remote_cluster == 'True'){ // Set cluster job info //var command = "from zutil import post; post.jo"; } } </script> """ return HTML(input_form.format(data_dir=data_dir, data_host=data_host, paraview_cmd=paraview_cmd, remote_data_checked=remote_data_checked, remote_server_auto_checked=remote_server_auto_checked, remote_cluster_checked=remote_cluster_checked, job_queue=job_queue, job_tasks=job_tasks, job_ntaskpernode=job_ntaskpernode, job_project=job_project) + javascript)
zenotech/zPost
python/zutil/post.py
Python
bsd-3-clause
41,409
[ "ParaView", "VTK" ]
e0436282299ee2b988ed97d690fa581513625e28f9dce1bf415fc62fda87d5b5
import numpy as nm from sfepy.base.base import output, assert_ from sfepy.base.ioutils import ensure_path from sfepy.linalg import cycle from sfepy.discrete.fem.mesh import Mesh from sfepy.mesh.mesh_tools import elems_q2t def get_tensor_product_conn(shape): """ Generate vertex connectivity for cells of a tensor-product mesh of the given shape. Parameters ---------- shape : array of 2 or 3 ints Shape (counts of nodes in x, y, z) of the mesh. Returns ------- conn : array The vertex connectivity array. desc : str The cell kind. """ shape = nm.asarray(shape) dim = len(shape) assert_(1 <= dim <= 3) n_nod = nm.prod(shape) n_el = nm.prod(shape - 1) grid = nm.arange(n_nod, dtype=nm.int32) grid.shape = shape if dim == 1: conn = nm.zeros((n_el, 2), dtype=nm.int32) conn[:, 0] = grid[:-1] conn[:, 1] = grid[1:] desc = '1_2' elif dim == 2: conn = nm.zeros((n_el, 4), dtype=nm.int32) conn[:, 0] = grid[:-1, :-1].flat conn[:, 1] = grid[1:, :-1].flat conn[:, 2] = grid[1:, 1:].flat conn[:, 3] = grid[:-1, 1:].flat desc = '2_4' else: conn = nm.zeros((n_el, 8), dtype=nm.int32) conn[:, 0] = grid[:-1, :-1, :-1].flat conn[:, 1] = grid[1:, :-1, :-1].flat conn[:, 2] = grid[1:, 1:, :-1].flat conn[:, 3] = grid[:-1, 1:, :-1].flat conn[:, 4] = grid[:-1, :-1, 1:].flat conn[:, 5] = grid[1:, :-1, 1:].flat conn[:, 6] = grid[1:, 1:, 1:].flat conn[:, 7] = grid[:-1, 1:, 1:].flat desc = '3_8' return conn, desc def gen_block_mesh(dims, shape, centre, mat_id=0, name='block', coors=None, verbose=True): """ Generate a 2D or 3D block mesh. The dimension is determined by the lenght of the shape argument. Parameters ---------- dims : array of 2 or 3 floats Dimensions of the block. shape : array of 2 or 3 ints Shape (counts of nodes in x, y, z) of the block mesh. centre : array of 2 or 3 floats Centre of the block. mat_id : int, optional The material id of all elements. name : string Mesh name. verbose : bool If True, show progress of the mesh generation. Returns ------- mesh : Mesh instance """ dims = nm.asarray(dims, dtype=nm.float64) shape = nm.asarray(shape, dtype=nm.int32) centre = nm.asarray(centre, dtype=nm.float64) dim = shape.shape[0] centre = centre[:dim] dims = dims[:dim] n_nod = nm.prod(shape) output('generating %d vertices...' % n_nod, verbose=verbose) x0 = centre - 0.5 * dims dd = dims / (shape - 1) ngrid = nm.mgrid[[slice(ii) for ii in shape]] ngrid.shape = (dim, n_nod) coors = x0 + ngrid.T * dd output('...done', verbose=verbose) n_el = nm.prod(shape - 1) output('generating %d cells...' % n_el, verbose=verbose) mat_ids = nm.empty((n_el,), dtype=nm.int32) mat_ids.fill(mat_id) conn, desc = get_tensor_product_conn(shape) output('...done', verbose=verbose) mesh = Mesh.from_data(name, coors, None, [conn], [mat_ids], [desc]) return mesh def gen_cylinder_mesh(dims, shape, centre, axis='x', force_hollow=False, is_open=False, open_angle=0.0, non_uniform=False, name='cylinder', verbose=True): """ Generate a cylindrical mesh along an axis. Its cross-section can be ellipsoidal. Parameters ---------- dims : array of 5 floats Dimensions of the cylinder: inner surface semi-axes a1, b1, outer surface semi-axes a2, b2, length. shape : array of 3 ints Shape (counts of nodes in radial, circumferential and longitudinal directions) of the cylinder mesh. centre : array of 3 floats Centre of the cylinder. axis: one of 'x', 'y', 'z' The axis of the cylinder. force_hollow : boolean Force hollow mesh even if inner radii a1 = b1 = 0. is_open : boolean Generate an open cylinder segment. open_angle : float Opening angle in radians. non_uniform : boolean If True, space the mesh nodes in radial direction so that the element volumes are (approximately) the same, making thus the elements towards the outer surface thinner. name : string Mesh name. verbose : bool If True, show progress of the mesh generation. Returns ------- mesh : Mesh instance """ dims = nm.asarray(dims, dtype=nm.float64) shape = nm.asarray(shape, dtype=nm.int32) centre = nm.asarray(centre, dtype=nm.float64) a1, b1, a2, b2, length = dims nr, nfi, nl = shape origin = centre - nm.array([0.5 * length, 0.0, 0.0]) dfi = 2.0 * (nm.pi - open_angle) / nfi if is_open: nnfi = nfi + 1 else: nnfi = nfi is_hollow = force_hollow or not (max(abs(a1), abs(b1)) < 1e-15) if is_hollow: mr = 0 else: mr = (nnfi - 1) * nl grid = nm.zeros((nr, nnfi, nl), dtype=nm.int32) n_nod = nr * nnfi * nl - mr coors = nm.zeros((n_nod, 3), dtype=nm.float64) angles = nm.linspace(open_angle, open_angle+(nfi)*dfi, nfi+1) xs = nm.linspace(0.0, length, nl) if non_uniform: ras = nm.zeros((nr,), dtype=nm.float64) rbs = nm.zeros_like(ras) advol = (a2**2 - a1**2) / (nr - 1) bdvol = (b2**2 - b1**2) / (nr - 1) ras[0], rbs[0] = a1, b1 for ii in range(1, nr): ras[ii] = nm.sqrt(advol + ras[ii-1]**2) rbs[ii] = nm.sqrt(bdvol + rbs[ii-1]**2) else: ras = nm.linspace(a1, a2, nr) rbs = nm.linspace(b1, b2, nr) # This is 3D only... output('generating %d vertices...' % n_nod, verbose=verbose) ii = 0 for ix in range(nr): a, b = ras[ix], rbs[ix] for iy, fi in enumerate(angles[:nnfi]): for iz, x in enumerate(xs): grid[ix,iy,iz] = ii coors[ii] = origin + [x, a * nm.cos(fi), b * nm.sin(fi)] ii += 1 if not is_hollow and (ix == 0): if iy > 0: grid[ix,iy,iz] = grid[ix,0,iz] ii -= 1 assert_(ii == n_nod) output('...done', verbose=verbose) n_el = (nr - 1) * nnfi * (nl - 1) conn = nm.zeros((n_el, 8), dtype=nm.int32) output('generating %d cells...' % n_el, verbose=verbose) ii = 0 for (ix, iy, iz) in cycle([nr-1, nnfi, nl-1]): if iy < (nnfi - 1): conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ], grid[ix+1,iy+1,iz ], grid[ix ,iy+1,iz ], grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1], grid[ix+1,iy+1,iz+1], grid[ix ,iy+1,iz+1]] ii += 1 elif not is_open: conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ], grid[ix+1,0,iz ], grid[ix ,0,iz ], grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1], grid[ix+1,0,iz+1], grid[ix ,0,iz+1]] ii += 1 mat_id = nm.zeros((n_el,), dtype = nm.int32) desc = '3_8' assert_(n_nod == (conn.max() + 1)) output('...done', verbose=verbose) if axis == 'z': coors = coors[:,[1,2,0]] elif axis == 'y': coors = coors[:,[2,0,1]] mesh = Mesh.from_data(name, coors, None, [conn], [mat_id], [desc]) return mesh def _spread_along_axis(axis, coors, tangents, grading_fun): """ Spread the coordinates along the given axis using the grading function, and the tangents in the other two directions. """ oo = list(set([0, 1, 2]).difference([axis])) c0, c1, c2 = coors[:, axis], coors[:, oo[0]], coors[:, oo[1]] out = nm.empty_like(coors) mi, ma = c0.min(), c0.max() nc0 = (c0 - mi) / (ma - mi) out[:, axis] = oc0 = grading_fun(nc0) * (ma - mi) + mi nc = oc0 - oc0.min() mi, ma = c1.min(), c1.max() n1 = 2 * (c1 - mi) / (ma - mi) - 1 out[:, oo[0]] = c1 + n1 * nc * tangents[oo[0]] mi, ma = c2.min(), c2.max() n2 = 2 * (c2 - mi) / (ma - mi) - 1 out[:, oo[1]] = c2 + n2 * nc * tangents[oo[1]] return out def _get_extension_side(side, grading_fun, mat_id, b_dims, b_shape, e_dims, e_shape, centre): """ Get a mesh extending the given side of a block mesh. """ # Pure extension dimensions. pe_dims = 0.5 * (e_dims - b_dims) coff = 0.5 * (b_dims + pe_dims) cc = centre + coff * nm.eye(3)[side] if side == 0: # x axis. dims = [pe_dims[0], b_dims[1], b_dims[2]] shape = [e_shape, b_shape[1], b_shape[2]] tangents = [0, pe_dims[1] / pe_dims[0], pe_dims[2] / pe_dims[0]] elif side == 1: # y axis. dims = [b_dims[0], pe_dims[1], b_dims[2]] shape = [b_shape[0], e_shape, b_shape[2]] tangents = [pe_dims[0] / pe_dims[1], 0, pe_dims[2] / pe_dims[1]] elif side == 2: # z axis. dims = [b_dims[0], b_dims[1], pe_dims[2]] shape = [b_shape[0], b_shape[1], e_shape] tangents = [pe_dims[0] / pe_dims[2], pe_dims[1] / pe_dims[2], 0] e_mesh = gen_block_mesh(dims, shape, cc, mat_id=mat_id, verbose=False) e_mesh.coors[:] = _spread_along_axis(side, e_mesh.coors, tangents, grading_fun) return e_mesh, shape def gen_extended_block_mesh(b_dims, b_shape, e_dims, e_shape, centre, grading_fun=None, name=None): """ Generate a 3D mesh with a central block and (coarse) extending side meshes. The resulting mesh is again a block. Each of the components has a different material id. Parameters ---------- b_dims : array of 3 floats The dimensions of the central block. b_shape : array of 3 ints The shape (counts of nodes in x, y, z) of the central block mesh. e_dims : array of 3 floats The dimensions of the complete block (central block + extensions). e_shape : int The count of nodes of extending blocks in the direction from the central block. centre : array of 3 floats The centre of the mesh. grading_fun : callable, optional A function of :math:`x \in [0, 1]` that can be used to shift nodes in the extension axis directions to allow smooth grading of element sizes from the centre. The default function is :math:`x**p` with :math:`p` determined so that the element sizes next to the central block have the size of the shortest edge of the central block. name : string, optional The mesh name. Returns ------- mesh : Mesh instance """ b_dims = nm.asarray(b_dims, dtype=nm.float64) b_shape = nm.asarray(b_shape, dtype=nm.int32) e_dims = nm.asarray(e_dims, dtype=nm.float64) centre = nm.asarray(centre, dtype=nm.float64) # Pure extension dimensions. pe_dims = 0.5 * (e_dims - b_dims) # Central block element sizes. dd = (b_dims / (b_shape - 1)) # The "first x" going to grading_fun. nc = 1.0 / (e_shape - 1) # Grading power and function. power = nm.log(dd.min() / pe_dims.min()) / nm.log(nc) grading_fun = (lambda x: x**power) if grading_fun is None else grading_fun # Central block mesh. b_mesh = gen_block_mesh(b_dims, b_shape, centre, mat_id=0, verbose=False) # 'x' extension. e_mesh, xs = _get_extension_side(0, grading_fun, 10, b_dims, b_shape, e_dims, e_shape, centre) mesh = b_mesh + e_mesh # Mirror by 'x'. e_mesh.coors[:, 0] = (2 * centre[0]) - e_mesh.coors[:, 0] e_mesh.cmesh.cell_groups.fill(11) mesh = mesh + e_mesh # 'y' extension. e_mesh, ys = _get_extension_side(1, grading_fun, 20, b_dims, b_shape, e_dims, e_shape, centre) mesh = mesh + e_mesh # Mirror by 'y'. e_mesh.coors[:, 1] = (2 * centre[1]) - e_mesh.coors[:, 1] e_mesh.cmesh.cell_groups.fill(21) mesh = mesh + e_mesh # 'z' extension. e_mesh, zs = _get_extension_side(2, grading_fun, 30, b_dims, b_shape, e_dims, e_shape, centre) mesh = mesh + e_mesh # Mirror by 'z'. e_mesh.coors[:, 2] = (2 * centre[2]) - e_mesh.coors[:, 2] e_mesh.cmesh.cell_groups.fill(31) mesh = mesh + e_mesh if name is not None: mesh.name = name # Verify merging by checking the number of nodes. n_nod = (nm.prod(nm.maximum(b_shape - 2, 0)) + 2 * nm.prod(xs) + 2 * (max(ys[0] - 2, 0) * ys[1] * ys[2]) + 2 * (max(zs[0] - 2, 0) * max(zs[1] - 2, 0) * zs[2])) if n_nod != mesh.n_nod: raise ValueError('Merge of meshes failed! (%d == %d)' % (n_nod, mesh.n_nod)) return mesh def tiled_mesh1d(conn, coors, ngrps, idim, n_rep, bb, eps=1e-6, ndmap=False): from sfepy.discrete.fem.periodic import match_grid_plane s1 = nm.nonzero(coors[:,idim] < (bb[0] + eps))[0] s2 = nm.nonzero(coors[:,idim] > (bb[1] - eps))[0] if s1.shape != s2.shape: raise ValueError, 'incompatible shapes: %s == %s'\ % (s1.shape, s2.shape) (nnod0, dim) = coors.shape nnod = nnod0 * n_rep - s1.shape[0] * (n_rep - 1) (nel0, nnel) = conn.shape nel = nel0 * n_rep dd = nm.zeros((dim,), dtype=nm.float64) dd[idim] = bb[1] - bb[0] m1, m2 = match_grid_plane(coors[s1], coors[s2], idim) oconn = nm.zeros((nel, nnel), dtype=nm.int32) ocoors = nm.zeros((nnod, dim), dtype=nm.float64) ongrps = nm.zeros((nnod,), dtype=nm.int32) if type(ndmap) is bool: ret_ndmap = ndmap else: ret_ndmap= True ndmap_out = nm.zeros((nnod,), dtype=nm.int32) el_off = 0 nd_off = 0 for ii in range(n_rep): if ii == 0: oconn[0:nel0,:] = conn ocoors[0:nnod0,:] = coors ongrps[0:nnod0] = ngrps.squeeze() nd_off += nnod0 mapto = s2[m2] mask = nm.ones((nnod0,), dtype=nm.int32) mask[s1] = 0 remap0 = nm.cumsum(mask) - 1 nnod0r = nnod0 - s1.shape[0] cidx = nm.where(mask) if ret_ndmap: ndmap_out[0:nnod0] = nm.arange(nnod0) else: remap = remap0 + nd_off remap[s1[m1]] = mapto mapto = remap[s2[m2]] ocoors[nd_off:(nd_off + nnod0r),:] =\ (coors[cidx,:] + ii * dd) ongrps[nd_off:(nd_off + nnod0r)] = ngrps[cidx].squeeze() oconn[el_off:(el_off + nel0),:] = remap[conn] if ret_ndmap: ndmap_out[nd_off:(nd_off + nnod0r)] = cidx[0] nd_off += nnod0r el_off += nel0 if ret_ndmap: if ndmap is not None: max_nd_ref = nm.max(ndmap) idxs = nm.where(ndmap_out > max_nd_ref) ndmap_out[idxs] = ndmap[ndmap_out[idxs]] return oconn, ocoors, ongrps, ndmap_out else: return oconn, ocoors, ongrps def gen_tiled_mesh(mesh, grid=None, scale=1.0, eps=1e-6, ret_ndmap=False): """ Generate a new mesh by repeating a given periodic element along each axis. Parameters ---------- mesh : Mesh instance The input periodic FE mesh. grid : array Number of repetition along each axis. scale : float, optional Scaling factor. eps : float, optional Tolerance for boundary detection. ret_ndmap : bool, optional If True, return global node map. Returns ------- mesh_out : Mesh instance FE mesh. ndmap : array Maps: actual node id --> node id in the reference cell. """ bbox = mesh.get_bounding_box() if grid is None: iscale = max(int(1.0 / scale), 1) grid = [iscale] * mesh.dim conn = mesh.get_conn(mesh.descs[0]) mat_ids = mesh.cmesh.cell_groups coors = mesh.coors ngrps = mesh.cmesh.vertex_groups nrep = nm.prod(grid) ndmap = None output('repeating %s ...' % grid) nblk = 1 for ii, gr in enumerate(grid): if ret_ndmap: (conn, coors, ngrps, ndmap0) = tiled_mesh1d(conn, coors, ngrps, ii, gr, bbox.transpose()[ii], eps=eps, ndmap=ndmap) ndmap = ndmap0 else: conn, coors, ngrps = tiled_mesh1d(conn, coors, ngrps, ii, gr, bbox.transpose()[ii], eps=eps) nblk *= gr output('...done') mat_ids = nm.tile(mat_ids, (nrep,)) mesh_out = Mesh.from_data('tiled mesh', coors * scale, ngrps, [conn], [mat_ids], [mesh.descs[0]]) if ret_ndmap: return mesh_out, ndmap else: return mesh_out def gen_misc_mesh(mesh_dir, force_create, kind, args, suffix='.mesh', verbose=False): """ Create sphere or cube mesh according to `kind` in the given directory if it does not exist and return path to it. """ import os from sfepy import data_dir defdir = os.path.join(data_dir, 'meshes') if mesh_dir is None: mesh_dir = defdir def retype(args, types, defaults): args=list(args) args.extend(defaults[len(args):len(defaults)]) return tuple([type(value) for type, value in zip(types, args) ]) if kind == 'sphere': default = [5, 41, args[0]] args = retype(args, [float, int, float], default) mesh_pattern = os.path.join(mesh_dir, 'sphere-%.2f-%.2f-%i') else: assert_(kind == 'cube') args = retype(args, (int, float, int, float, int, float), (args[0], args[1], args[0], args[1], args[0], args[1])) mesh_pattern = os.path.join(mesh_dir, 'cube-%i_%.2f-%i_%.2f-%i_%.2f') if verbose: output(args) filename = mesh_pattern % args if not force_create: if os.path.exists(filename): return filename if os.path.exists(filename + '.mesh') : return filename + '.mesh' if os.path.exists(filename + '.vtk'): return filename + '.vtk' if kind == 'cube': filename = filename + suffix ensure_path(filename) output('creating new cube mesh') output('(%i nodes in %.2f) x (%i nodes in %.2f) x (%i nodes in %.2f)' % args) output('to file %s...' % filename) mesh = gen_block_mesh(args[1::2], args[0::2], (0.0, 0.0, 0.0), name=filename) mesh.write(filename, io='auto') output('...done') else: import subprocess, shutil, tempfile filename = filename + '.mesh' ensure_path(filename) output('creating new sphere mesh (%i nodes, r=%.2f) and gradation %d' % args) output('to file %s...' % filename) f = open(os.path.join(defdir, 'quantum', 'sphere.geo')) tmp_dir = tempfile.mkdtemp() tmpfile = os.path.join(tmp_dir, 'sphere.geo.temp') ff = open(tmpfile, "w") ff.write(""" R = %i.0; n = %i.0; dens = %f; """ % args) ff.write(f.read()) f.close() ff.close() subprocess.call(['gmsh', '-3', tmpfile, '-format', 'mesh', '-o', filename]) shutil.rmtree(tmp_dir) output('...done') return filename def gen_mesh_from_string(mesh_name, mesh_dir): import re result = re.match('^\\s*([a-zA-Z]+)[:\\(]([^\\):]*)[:\\)](\\*)?\\s*$', mesh_name) if result is None: return mesh_name else: args = re.split(',', result.group(2)) kind = result.group(1) return gen_misc_mesh(mesh_dir, result.group(3)=='*', kind, args) def gen_mesh_from_goem(geo, a=None, quadratic=False, verbose=True, refine=False, polyfilename='./meshgen.poly', out='mesh', **kwargs): """ Runs mesh generator - tetgen for 3D or triangle for 2D meshes. Parameters ---------- geo : geometry geometry description a : int, optional a maximum area/volume constraint quadratic : bool, optional set True for quadratic elements verbose : bool, optional detailed information refine : bool, optional refines mesh Returns ------- mesh : Mesh instance triangular or tetrahedral mesh """ import os.path as op import pexpect # write geometry to poly file geo.to_poly_file(polyfilename) if not refine: params = "-Apq" else: params = "-Arq" if verbose: params = params + " -Q" if a != None and not refine: params = params + " -a%f" % (a) if refine: params = params + " -a" if quadratic: params = params + " -o2" params = params + " %s" % (polyfilename) meshgen_call = {2: 'triangle', 3: 'tetgen'} cmd = "%s %s" % (meshgen_call[geo.dim], params) if verbose: print "Generating mesh using", cmd if geo.dim == 2: p=pexpect.run(cmd, timeout=None) bname, ext = op.splitext(polyfilename) mesh = Mesh.from_file(bname + '.1.node') mesh.write(bname + '.' + out) if geo.dim == 3: p=pexpect.spawn(cmd, timeout=None) if not refine: p.expect("Opening %s." % (polyfilename)) else: p.expect("Opening %s.node.\r\n" % (polyfilename)) p.expect("Opening %s.ele.\r\n" % (polyfilename)) p.expect("Opening %s.face.\r\n" % (polyfilename)) p.expect("Opening %s.vol." % (polyfilename)) assert p.before == "" p.expect(pexpect.EOF) if p.before != "\r\n": print p.before raise "Error when running mesh generator (see above for output): %s" % cmd # http://www.cs.cmu.edu/~quake/triangle.html # # triangle [-prq__a__uAcDjevngBPNEIOXzo_YS__iFlsCQVh] input_file # -p Triangulates a Planar Straight Line Graph (.poly file). # -r Refines a previously generated mesh. # -q Quality mesh generation. A minimum angle may be specified. # -a Applies a maximum triangle area constraint. # -u Applies a user-defined triangle constraint. # -A Applies attributes to identify triangles in certain regions. # -c Encloses the convex hull with segments. # -D Conforming Delaunay: all triangles are truly Delaunay. # -j Jettison unused vertices from output .node file. # -e Generates an edge list. # -v Generates a Voronoi diagram. # -n Generates a list of triangle neighbors. # -g Generates an .off file for Geomview. # -B Suppresses output of boundary information. # -P Suppresses output of .poly file. # -N Suppresses output of .node file. # -E Suppresses output of .ele file. # -I Suppresses mesh iteration numbers. # -O Ignores holes in .poly file. # -X Suppresses use of exact arithmetic. # -z Numbers all items starting from zero (rather than one). # -o2 Generates second-order subparametric elements. # -Y Suppresses boundary segment splitting. # -S Specifies maximum number of added Steiner points. # -i Uses incremental method, rather than divide-and-conquer. # -F Uses Fortune's sweepline algorithm, rather than d-and-c. # -l Uses vertical cuts only, rather than alternating cuts. # -s Force segments into mesh by splitting (instead of using CDT). # -C Check consistency of final mesh. # -Q Quiet: No terminal output except errors. # -V Verbose: Detailed information on what I'm doing. # -h Help: Detailed instructions for Triangle. # http://tetgen.berlios.de/ # # tetgen [-prq_a_AiMYS_T_dzo_fenvgGOJBNEFICQVh] input_file # -p Tetrahedralizes a piecewise linear complex (PLC). # -r Reconstructs a previously generated mesh. # -q Refines mesh (to improve mesh quality). # -a Applies a maximum tetrahedron volume constraint. # -A Assigns attributes to tetrahedra in different regions. # -i Inserts a list of additional points into mesh. # -M No merge of coplanar facets. # -Y No splitting of input boundaries (facets and segments). # -S Specifies maximum number of added points. # -T Sets a tolerance for coplanar test (default 1e-8). # -d Detects self-intersections of facets of the PLC. # -z Numbers all output items starting from zero. # -o2 Generates second-order subparametric elements. # -f Outputs all faces to .face file. # -e Outputs all edges to .edge file. # -n Outputs tetrahedra neighbors to .neigh file. # -v Outputs Voronoi diagram to files. # -g Outputs mesh to .mesh file for viewing by Medit. # -G Outputs mesh to .msh file for viewing by Gid. # -O Outputs mesh to .off file for viewing by Geomview. # -K Outputs mesh to .vtk file for viewing by Paraview. # -J No jettison of unused vertices from output .node file. # -B Suppresses output of boundary information. # -N Suppresses output of .node file. # -E Suppresses output of .ele file. # -F Suppresses output of .face file. # -I Suppresses mesh iteration numbers. # -C Checks the consistency of the final mesh. # -Q Quiet: No terminal output except errors. # -V Verbose: Detailed information, more terminal output. # -h Help: A brief instruction for using TetGen. def gen_mesh_from_voxels(voxels, dims, etype='q'): """ Generate FE mesh from voxels (volumetric data). Parameters ---------- voxels : array Voxel matrix, 1=material. dims : array Size of one voxel. etype : integer, optional 'q' - quadrilateral or hexahedral elements 't' - triangular or tetrahedral elements Returns ------- mesh : Mesh instance Finite element mesh. """ dims = dims.squeeze() dim = len(dims) nddims = nm.array(voxels.shape) + 2 nodemtx = nm.zeros(nddims, dtype=nm.int32) if dim == 2: #iy, ix = nm.where(voxels.transpose()) iy, ix = nm.where(voxels) nel = ix.shape[0] if etype == 'q': nodemtx[ix,iy] += 1 nodemtx[ix + 1,iy] += 1 nodemtx[ix + 1,iy + 1] += 1 nodemtx[ix,iy + 1] += 1 elif etype == 't': nodemtx[ix,iy] += 2 nodemtx[ix + 1,iy] += 1 nodemtx[ix + 1,iy + 1] += 2 nodemtx[ix,iy + 1] += 1 nel *= 2 elif dim == 3: #iy, ix, iz = nm.where(voxels.transpose(1, 0, 2)) iy, ix, iz = nm.where(voxels) nel = ix.shape[0] if etype == 'q': nodemtx[ix,iy,iz] += 1 nodemtx[ix + 1,iy,iz] += 1 nodemtx[ix + 1,iy + 1,iz] += 1 nodemtx[ix,iy + 1,iz] += 1 nodemtx[ix,iy,iz + 1] += 1 nodemtx[ix + 1,iy,iz + 1] += 1 nodemtx[ix + 1,iy + 1,iz + 1] += 1 nodemtx[ix,iy + 1,iz + 1] += 1 elif etype == 't': nodemtx[ix,iy,iz] += 6 nodemtx[ix + 1,iy,iz] += 2 nodemtx[ix + 1,iy + 1,iz] += 2 nodemtx[ix,iy + 1,iz] += 2 nodemtx[ix,iy,iz + 1] += 2 nodemtx[ix + 1,iy,iz + 1] += 2 nodemtx[ix + 1,iy + 1,iz + 1] += 6 nodemtx[ix,iy + 1,iz + 1] += 2 nel *= 6 else: msg = 'incorrect voxel dimension! (%d)' % dim raise ValueError(msg) ndidx = nm.where(nodemtx) coors = nm.array(ndidx).transpose() * dims nnod = coors.shape[0] nodeid = -nm.ones(nddims, dtype=nm.int32) nodeid[ndidx] = nm.arange(nnod) # generate elements if dim == 2: elems = nm.array([nodeid[ix,iy], nodeid[ix + 1,iy], nodeid[ix + 1,iy + 1], nodeid[ix,iy + 1]]).transpose() elif dim == 3: elems = nm.array([nodeid[ix,iy,iz], nodeid[ix + 1,iy,iz], nodeid[ix + 1,iy + 1,iz], nodeid[ix,iy + 1,iz], nodeid[ix,iy,iz + 1], nodeid[ix + 1,iy,iz + 1], nodeid[ix + 1,iy + 1,iz + 1], nodeid[ix,iy + 1,iz + 1]]).transpose() if etype == 't': elems = elems_q2t(elems) eid = etype + str(dim) eltab = {'q2': 4, 'q3': 8, 't2': 3, 't3': 4} mesh = Mesh.from_data('voxel_data', coors, nm.ones((nnod,), dtype=nm.int32), {0: nm.ascontiguousarray(elems)}, {0: nm.ones((nel,), dtype=nm.int32)}, {0: '%d_%d' % (dim, eltab[eid])}) return mesh def gen_mesh_from_poly(filename, verbose=True): """ Import mesh generated by tetgen or triangle. Parameters ---------- filename : string file name Returns ------- mesh : Mesh instance triangular or tetrahedral mesh """ def getnodes(fnods,up): f=file(fnods) l=[int(x) for x in f.readline().split()] npoints,dim,nattrib,nbound=l if verbose: up.init(npoints) nodes=[] for line in f: if line[0]=="#": continue l=[float(x) for x in line.split()] l = l[:(dim + 1)] l[0]=int(l[0]) nodes.append(tuple(l)) assert l[0]==len(nodes) assert npoints==len(nodes) return nodes def getele(fele,up): f=file(fele) l=[int(x) for x in f.readline().split()] nele,nnod,nattrib=l #we have either linear or quadratic tetrahedra: if nnod in [4,10]: elem = 'tetra' linear = (nnod == 4) if nnod in [3, 7]: elem = 'tri' linear = (nnod == 3) # if nattrib!=1: # raise "tetgen didn't assign an entity number to each element (option -A)" els=[] regions={} for line in f: if line[0]=="#": continue l=[int(x) for x in line.split()] if elem == 'tri': if linear: assert (len(l) - 1 - nattrib) == 3 els.append((l[0],l[1],l[2],l[3])) regionnum=l[5] else: assert len(l)-2 == 10 els.append((l[0],54,l[1],l[2],l[3],l[4], l[5],l[6],l[7],l[8],l[9],l[10])) regionnum=l[11] if elem == 'tetra': if linear: assert len(l)-2 == 4 els.append((l[0],54,l[1],l[2],l[3],l[4])) regionnum=l[5] else: assert len(l)-2 == 10 els.append((l[0],54,l[1],l[2],l[3],l[4], l[5],l[6],l[7],l[8],l[9],l[10])) regionnum=l[11] if regionnum==0: print "see %s, element # %d"%(fele,l[0]) raise "there are elements not belonging to any physical entity" if regions.has_key(regionnum): regions[regionnum].append(l[0]) else: regions[regionnum]=[l[0]] assert l[0]==len(els) if verbose: up.update(l[0]) return els,regions,linear def getBCfaces(ffaces,up): f=file(ffaces) l=[int(x) for x in f.readline().split()] nfaces,nattrib=l if nattrib!=1: raise "tetgen didn't assign an entity number to each face \ (option -A)" if verbose: up.init(nfaces) faces={} for line in f: if line[0]=="#": continue l=[int(x) for x in line.split()] assert len(l)==5 regionnum=l[4] if regionnum==0: continue if faces.has_key(regionnum): faces[regionnum].append((l[1],l[2],l[3])) else: faces[regionnum]=[(l[1],l[2],l[3])] if verbose: up.update(l[0]) return faces def calculatexyz(nodes, els): """Calculate the missing xyz values in place""" def avg(i,j,n4,nodes): a=nodes[n4[i-1]-1] b=nodes[n4[j-1]-1] return (a[1]+b[1])/2, (a[2]+b[2])/2, (a[3]+b[3])/2 def getxyz(i,n4,nodes): if i+5==5: return avg(1,2,n4,nodes) if i+5==6: return avg(2,3,n4,nodes) if i+5==7: return avg(1,3,n4,nodes) if i+5==8: return avg(1,4,n4,nodes) if i+5==9: return avg(2,4,n4,nodes) if i+5==10: return avg(3,4,n4,nodes) raise "wrong topology" for e in els: n4=e[2:2+4] n6=e[2+4:2+4+10] for i,n in enumerate(n6): x,y,z=getxyz(i,n4,nodes) nodes[n-1]=(n,x,y,z) if verbose: print "Reading geometry from poly file..." m=Mesh() m.nodes=getnodes(filename+".node") m.elements,m.regions, lin=getele(filename+".ele") if not lin: #tetgen doesn't compute xyz coordinates of the aditional 6 nodes #(only of the 4 corner nodes) in tetrahedra. calculatexyz(m.nodes,m.elements) m.faces=getBCfaces(filename+".face") return m def main(): mesh = gen_block_mesh(nm.array((1.0, 2.0, 3.0)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), name='') mesh.write('0.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=False, open_angle = 0.0, name='') mesh.write('1.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=True, open_angle = 0.0, name='') mesh.write('2.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=True, open_angle = 0.5, name='') mesh.write('3.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 2.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=False, open_angle = 0.0, name='') mesh.write('4.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=True, open_angle = 0.5, name='') mesh.write('5.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=True, open_angle = 0.5, non_uniform=True, name='') mesh.write('6.mesh', io = 'auto') mesh = gen_cylinder_mesh(nm.array((0.5, 0.5, 1.0, 2.0, 3)), nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)), is_open=True, open_angle = 0.5, non_uniform=True, name='') mesh.write('7.mesh', io = 'auto') if __name__ == '__main__': main()
RexFuzzle/sfepy
sfepy/mesh/mesh_generators.py
Python
bsd-3-clause
35,893
[ "ParaView", "VTK" ]
c0e94ee428f35dbdbc93f88e99d327895fb9abbb6eb751542ef28ccb04b4bf8a
#!/usr/bin/env python2.7 """ @author Frank Austin Nothaft fnothaft@berkeley.edu @date 12/30/2015 Pipeline to go from FASTQ to VCF using both the ADAM+HaplotypeCaller pipeline as well as the GATK best practices pipeline. 0 --> ... --> 4 --> 5 | |++(6) | 7 --> 9 --> ... --> 12 --> 13 --> ... --> 17 | ++(8) | | 18 | / \ | 19 20 | / \ | 21 22 | | + --> 23 --> ... --> 34 --> 35 --> ... --> 39 | 40 / \ 41 42 / \ 43 44 BWA alignment 0 bwa alignment to a reference 1 samtools sam to bam conversion (no sort) 2 Fix header 3 Add read groups 4 Upload to S3 ADAM preprocessing 5 Start master 6 Master Service 7 Start Workers 8 Worker Service 9 Download Data 10 ADAM Convert 11 ADAM Transform 12 Upload Data GATK haplotype caller 13 Start GATK box 14 Download reference 15 Index reference 16 Build reference dictionary 17 Index samples 18 Run HaplotypeCaller 19 Run VQSR on SNPs 20 Run VQSR on INDELs 21 Apply VQSR model to SNPs 22 Apply VQSR model to INDELs GATK preprocessing 23 Download shared data 24 Reference preprocessing 25 Download sample 26 Index 27 Sort 28 Mark duplicates 29 Index 30 Realigner target 31 Indel realignment 32 Index 33 Base recalibration 34 Output BQSR file GATK haplotype caller 35 Start GATK box 36 Download reference 37 Index reference 38 Build reference dictionary 39 Index samples 40 Run HaplotypeCaller 41 Run VQSR on SNPs 42 Run VQSR on INDELs 43 Apply VQSR model to SNPs 44 Apply VQSR model to INDELs However, the pipeline in this file is actually just five encapsulated jobs: A / \ B D | | C E A Run BWA alignment (jobs 0-4) B Run ADAM preprocessing (jobs 5-12) C Run GATK haplotype caller (jobs 13-22) D Run GATK preprocessing (jobs 23-34) E Run GATK haplotype caller (jobs 35-44) =================================================================== :Dependencies: curl - apt-get install curl Toil - pip install --pre toil Docker - http://docs.docker.com/engine/installation/ Optional: S3AM - pip install --s3am (requires ~/.boto config file) """ # import from python system libraries import argparse import copy import textwrap from multiprocessing import cpu_count import yaml # import toil features from toil.job import Job # these don't seem necessary! but, must be imported here due to a serialization issue from toil_lib.spark import spawn_spark_cluster from toil_lib.programs import mock_mode # import job steps from other toil pipelines from toil_scripts.adam_pipeline.adam_preprocessing import * #static_adam_preprocessing_dag from toil_scripts.bwa_alignment.bwa_alignment import * #download_shared_files from toil_scripts.gatk_germline.germline import * #run_gatk_germline_pipeline from toil_lib.files import generate_file def sample_loop(job, uuid_list, inputs): """ Loops over the sample_ids (uuids) in the manifest, creating child jobs to process each """ for uuid_rg in uuid_list: uuid_items = uuid_rg.split(',') uuid = uuid_items[0] rg_line = None if len(uuid_items) > 1: rg_line = uuid_items[1] job.addChildJobFn(static_dag, uuid, rg_line, inputs) def static_dag(job, uuid, rg_line, inputs): """ Prefer this here as it allows us to pull the job functions from other jobs without rewrapping the job functions back together. bwa_inputs: Input arguments to be passed to BWA. adam_inputs: Input arguments to be passed to ADAM. gatk_preprocess_inputs: Input arguments to be passed to GATK preprocessing. gatk_adam_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of ADAM preprocessing. gatk_gatk_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of GATK preprocessing. """ # get work directory work_dir = job.fileStore.getLocalTempDir() inputs.cpu_count = cpu_count() inputs.maxCores = sys.maxint args = {'uuid': uuid, 's3_bucket': inputs.s3_bucket, 'sequence_dir': inputs.sequence_dir, 'dir_suffix': inputs.dir_suffix} # get head BWA alignment job function and encapsulate it inputs.rg_line = rg_line inputs.output_dir = 's3://{s3_bucket}/alignment{dir_suffix}'.format(**args) bwa = job.wrapJobFn(download_reference_files, inputs, [[uuid, ['s3://{s3_bucket}/{sequence_dir}/{uuid}_1.fastq.gz'.format(**args), 's3://{s3_bucket}/{sequence_dir}/{uuid}_2.fastq.gz'.format(**args)]]]).encapsulate() # get head ADAM preprocessing job function and encapsulate it adam_preprocess = job.wrapJobFn(static_adam_preprocessing_dag, inputs, 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args), 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}'.format(**args), suffix='.adam').encapsulate() # Configure options for Toil Germline pipeline. This function call only runs the preprocessing steps. gatk_preprocessing_inputs = copy.deepcopy(inputs) gatk_preprocessing_inputs.suffix = '.gatk' gatk_preprocessing_inputs.preprocess = True gatk_preprocessing_inputs.preprocess_only = True gatk_preprocessing_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK preprocessing job function and encapsulate it gatk_preprocess = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args), None, # Does not require second URL or RG_Line None), gatk_preprocessing_inputs).encapsulate() # Configure options for Toil Germline pipeline for preprocessed ADAM BAM file. adam_call_inputs = inputs adam_call_inputs.suffix = '.adam' adam_call_inputs.sorted = True adam_call_inputs.preprocess = False adam_call_inputs.run_vqsr = False adam_call_inputs.joint_genotype = False adam_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK haplotype caller job function for the result of ADAM preprocessing and encapsulate it gatk_adam_call = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.adam.bam'.format(**args), None, None), adam_call_inputs).encapsulate() # Configure options for Toil Germline pipeline for preprocessed GATK BAM file. gatk_call_inputs = copy.deepcopy(inputs) gatk_call_inputs.sorted = True gatk_call_inputs.preprocess = False gatk_call_inputs.run_vqsr = False gatk_call_inputs.joint_genotype = False gatk_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK haplotype caller job function for the result of GATK preprocessing and encapsulate it gatk_gatk_call = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 'S3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.gatk.bam'.format(**args), None, None), gatk_call_inputs).encapsulate() # wire up dag if not inputs.skip_alignment: job.addChild(bwa) if (inputs.pipeline_to_run == "adam" or inputs.pipeline_to_run == "both"): if inputs.skip_preprocessing: job.addChild(gatk_adam_call) else: if inputs.skip_alignment: job.addChild(adam_preprocess) else: bwa.addChild(adam_preprocess) adam_preprocess.addChild(gatk_adam_call) if (inputs.pipeline_to_run == "gatk" or inputs.pipeline_to_run == "both"): if inputs.skip_preprocessing: job.addChild(gatk_gatk_call) else: if inputs.skip_alignment: job.addChild(gatk_preprocess) else: bwa.addChild(gatk_preprocess) gatk_preprocess.addChild(gatk_gatk_call) def generate_mock_config(): return textwrap.dedent(""" # ADAM/GATK Pipeline configuration file # This configuration file is formatted in YAML. Simply write the value (at least one space) after the colon. # Edit the values in this configuration file and then rerun the pipeline. # This configuration file is pre-filled for use in MOCK MODE ############################################################################################################## # MOCK INPUTS pipeline-to-run: both skip-alignment: False skip-preprocessing: False sequence-dir: sequence autoscale-cluster: False s3-bucket: adam-gatk-pipeline-mock-files cpu-count: program-unit: 12345 platform: ILLUMINA ref: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_ref.fa amb: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_ref.fa.amb ann: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_ref.fa.ann bwt: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_ref.fa.bwt pac: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_ref.fa.pac sa: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_ref.fa.sa fai: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_ref.fa.fai alt: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_ref.fa.alt phase: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_phase.vcf mills: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_mills.vcf dbsnp: s3://adam-gatk-pipeline-mock-files/mock-pipeline-inputs/bqsr1.vcf omni: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_omni.vcf hapmap: https://s3-us-west-2.amazonaws.com/adam-gatk-pipeline-mock-files/mock-pipeline-inputs/mock_hapmap.vcf trim-adapters: False file-size: 10M s3-bucket: adam-gatk-pipeline-mock-files memory: 2 dir-suffix: /mock num-nodes: #3 master-ip: spark-master ssec: """[1:]) def generate_config(): if mock_mode(): return generate_mock_config() return textwrap.dedent(""" # ADAM/GATK Pipeline configuration file # This configuration file is formatted in YAML. Simply write the value (at least one space) after the colon. # Edit the values in this configuration file and then rerun the pipeline # Comments (beginning with #) do not need to be removed. Optional parameters may be left blank. ############################################################################################################## pipeline-to-run: both # skip-alignment: False # skip-preprocessing: False # sequence-dir: sequence # autoscale-cluster: False # s3-bucket: # S3 Bucket URI cpu-count: # Optional: program-unit: 12345 # platform: ILLUMINA # ref: # Required: Reference fasta file amb: # Required: Reference fasta file (amb) ann: # Required: Reference fasta file (ann) bwt: # Required: Reference fasta file (bwt) pac: # Required: Reference fasta file (pac) sa: # Required: Reference fasta file (sa) fai: # Required: Reference fasta file (fai) alt: # Optional: Alternate file for reference build (alt). Necessary for alt aware alignment. phase: # Required: URL (1000G_phase1.indels.hg19.sites.fixed.vcf) mills: # Required: URL (Mills_and_1000G_gold_standard.indels.hg19.sites.vcf) dbsnp: # Required: URL (dbsnp_132_b37.leftAligned.vcf) hapmap: # Required: URL (hapmap_3.3.b37.vcf) omni: # Required: URL (1000G_omni.5.b37.vcf) trim-adapters: False # Trim adapters. num-nodes: 9 # Number of nodes to use. Do not set if providing master_ip. master-ip: # Optional: IP or hostname of host running for Spark master and HDFS namenode. # Should be provided instead of num-nodes if pointing at a static (external or # standalone) Spark cluster. The special value 'auto' indicates the master of # an externally autoscaled cgcloud spark cluster, i.e. one that is managed by # the uberscript. file-size: 100G # Approximate input file size. Should be given as %d[TGMK], e.g., # for a 100 gigabyte file, use file_size: '100G' ssec: # Optional: (string) Path to Key File for SSE-C Encryption dir-suffix: # Optional: suffix to add to output directory names. memory: # Required: Amount of available memory on each worker node. """[1:]) def generate_mock_manifest(): return textwrap.dedent(""" # This manifest was generated for use in MOCK MODE mouse_chrM_a mouse_chrM_b """[1:]) def generate_manifest(): if mock_mode(): return generate_mock_manifest() return textwrap.dedent(""" # Edit this manifest to include information pertaining to each sample to be run. # There is a single column: UUID # # UUID This should be a unique identifier for the sample to be processed that corresponds to # the prefix of the filenames of the input fastq files. # # Example: # If your input fastq file pairs were input_file_name_1.illumina_1.fastq.gz, input_file_name_1.illumina_2.fastq.gz and # input_file_name_2.illumina_1.fastq.gz, input_file_name_2.illumina_2.fastq.gz, the manifest would be: # # input_file_name_1.illumina # input_file_name_2.illumina # # Input fastq files MUST be named according to the filename_1.fastq.gz, filename_2.fastq.gz convention # # Place your samples below, one per line. """[1:]) def main(): """ This is a Toil pipeline used to perform alignment of fastqs. """ # Define Parser object and add to Toil if mock_mode(): usage_msg = 'You have the TOIL_SCRIPTS_MOCK_MODE environment variable set, so this pipeline ' \ 'will run in mock mode. To disable mock mode, set TOIL_SCRIPTS_MOCK_MODE=0' else: usage_msg = None parser = argparse.ArgumentParser(usage=usage_msg) subparsers = parser.add_subparsers(dest='command') subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.') # Run subparser parser_run = subparsers.add_parser('run', help='Runs the ADAM/GATK pipeline') default_config = 'adam-gatk-mock.config' if mock_mode() else 'adam-gatk.config' default_manifest = 'adam-gatk-mock-manifest.csv' if mock_mode() else 'adam-gatk-manifest.csv' parser_run.add_argument('--config', default=default_config, type=str, help='Path to the (filled in) config file, generated with "generate-config".') parser_run.add_argument('--manifest', default=default_manifest, type=str, help='Path to the (filled in) manifest file, generated with "generate-manifest". ' '\nDefault value: "%(default)s".') Job.Runner.addToilOptions(parser_run) args = parser.parse_args() cwd = os.getcwd() if args.command == 'generate-config' or args.command == 'generate': generate_file(os.path.join(cwd, default_config), generate_config) if args.command == 'generate-manifest' or args.command == 'generate': generate_file(os.path.join(cwd, default_manifest), generate_manifest) # Pipeline execution elif args.command == 'run': require(os.path.exists(args.config), '{} not found. Please run ' 'generate-config'.format(args.config)) if not hasattr(args, 'sample'): require(os.path.exists(args.manifest), '{} not found and no samples provided. Please ' 'run "generate-manifest"'.format(args.manifest)) # Parse config parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()} inputs = argparse.Namespace(**parsed_config) # Parse manifest file uuid_list = [] with open(args.manifest) as f_manifest: for line in f_manifest: if not line.isspace() and not line.startswith('#'): uuid_list.append(line.strip()) inputs.sort = False if not inputs.dir_suffix: inputs.dir_suffix = '' if not inputs.s3_bucket: inputs.s3_bucket = '' if inputs.master_ip and inputs.num_nodes: raise ValueError("Exactly one of master_ip (%s) and num_nodes (%d) must be provided." % (inputs.master_ip, inputs.num_nodes)) if not hasattr(inputs, 'master_ip') and inputs.num_nodes <= 1: raise ValueError('num_nodes allocates one Spark/HDFS master and n-1 workers, and thus must be greater ' 'than 1. %d was passed.' % inputs.num_nodes) if (inputs.pipeline_to_run != "adam" and inputs.pipeline_to_run != "gatk" and inputs.pipeline_to_run != "both"): raise ValueError("pipeline_to_run must be either 'adam', 'gatk', or 'both'. %s was passed." % inputs.pipeline_to_run) Job.Runner.startToil(Job.wrapJobFn(sample_loop, uuid_list, inputs), args) if __name__=="__main__": main()
jpfeil/toil-scripts
src/toil_scripts/adam_gatk_pipeline/align_and_call.py
Python
apache-2.0
20,325
[ "BWA" ]
7885da6e18c43cc6baf5fcab64c1b1d113ba2ed077077f59c573512030e0b015
# -*- coding: utf-8 -*- """ ASSET is a statistical method :cite:`asset-Torre16_e1004939` for the detection of repeating sequences of synchronous spiking events in parallel spike trains. ASSET analysis class object of finding patterns ----------------------------------------------- .. autosummary:: :toctree: toctree/asset/ ASSET Patterns post-exploration ------------------------- .. autosummary:: :toctree: toctree/asset/ synchronous_events_intersection synchronous_events_difference synchronous_events_identical synchronous_events_no_overlap synchronous_events_contained_in synchronous_events_contains_all synchronous_events_overlap Tutorial -------- :doc:`View tutorial <../tutorials/asset>` Run tutorial interactively: .. image:: https://mybinder.org/badge.svg :target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master ?filepath=doc/tutorials/asset.ipynb Examples -------- 0) Create `ASSET` class object that holds spike trains. `ASSET` requires at least one argument - a list of spike trains. If `spiketrains_y` is not provided, the same spike trains are used to build an intersection matrix with. >>> import neo >>> import numpy as np >>> import quantities as pq >>> from elephant import asset >>> spiketrains = [ ... neo.SpikeTrain([start, start + 6] * (3 * pq.ms) + 10 * pq.ms, ... t_stop=60 * pq.ms) ... for _ in range(3) ... for start in range(3) ... ] >>> asset_obj = asset.ASSET(spiketrains, bin_size=3*pq.ms, verbose=False) 1) Build the intersection matrix `imat`: >>> imat = asset_obj.intersection_matrix() 2) Estimate the probability matrix `pmat`, using the analytical method: >>> pmat = asset_obj.probability_matrix_analytical(imat, ... kernel_width=9*pq.ms) 3) Compute the joint probability matrix `jmat`, using a suitable filter: >>> jmat = asset_obj.joint_probability_matrix(pmat, filter_shape=(5, 1), ... n_largest=3) 4) Create the masked version of the intersection matrix, `mmat`, from `pmat` and `jmat`: >>> mmat = asset_obj.mask_matrices([pmat, jmat], thresholds=.9) 5) Cluster significant elements of imat into diagonal structures: >>> cmat = asset_obj.cluster_matrix_entries(mmat, max_distance=3, ... min_neighbors=3, stretch=5) 6) Extract sequences of synchronous events: >>> sses = asset_obj.extract_synchronous_events(cmat) The ASSET found 2 sequences of synchronous events: >>> from pprint import pprint >>> pprint(sses) {1: {(9, 3): {0, 3, 6}, (10, 4): {1, 4, 7}, (11, 5): {8, 2, 5}}} """ from __future__ import division, print_function, unicode_literals import warnings import neo import numpy as np import quantities as pq import scipy.spatial import scipy.stats from sklearn.cluster import dbscan from tqdm import trange, tqdm import elephant.conversion as conv from elephant import spike_train_surrogates try: from mpi4py import MPI mpi_accelerated = True comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() except ImportError: mpi_accelerated = False size = 1 rank = 0 __all__ = [ "ASSET", "synchronous_events_intersection", "synchronous_events_difference", "synchronous_events_identical", "synchronous_events_no_overlap", "synchronous_events_contained_in", "synchronous_events_contains_all", "synchronous_events_overlap" ] # ============================================================================= # Some Utility Functions to be dealt with in some way or another # ============================================================================= def _signals_same_attribute(signals, attr_name): """ Check whether a list of signals (`neo.AnalogSignal` or `neo.SpikeTrain`) have same attribute `attr_name`. If so, return that value. Otherwise, raise ValueError. Parameters ---------- signals : list A list of signals (e.g. `neo.AnalogSignal` or `neo.SpikeTrain`) having attribute `attr_name`. Returns ------- pq.Quantity The value of the common attribute `attr_name` of the list of signals. Raises ------ ValueError If `signals` is an empty list. If `signals` have different `attr_name` attribute values. """ if len(signals) == 0: raise ValueError('Empty signals list') attribute = getattr(signals[0], attr_name) for sig in signals[1:]: if getattr(sig, attr_name) != attribute: raise ValueError( "Signals have different '{}' values".format(attr_name)) return attribute def _quantities_almost_equal(x, y): """ Returns True if two quantities are almost equal, i.e., if `x - y` is "very close to 0" (not larger than machine precision for floats). Parameters ---------- x : pq.Quantity First Quantity to compare. y : pq.Quantity Second Quantity to compare. Must have same unit type as `x`, but not necessarily the same shape. Any shapes of `x` and `y` for which `x - y` can be calculated are permitted. Returns ------- np.ndarray Array of `bool`, which is True at any position where `x - y` is almost zero. Notes ----- Not the same as `numpy.testing.assert_allclose` (which does not work with Quantities) and `numpy.testing.assert_almost_equal` (which works only with decimals) """ eps = np.finfo(float).eps relative_diff = (x - y).magnitude return np.all([-eps <= relative_diff, relative_diff <= eps], axis=0) def _transactions(spiketrains, bin_size, t_start, t_stop, ids=None): """ Transform parallel spike trains into a list of sublists, called transactions, each corresponding to a time bin and containing the list of spikes in `spiketrains` falling into that bin. To compute each transaction, the spike trains are binned (with adjacent exclusive binning) and clipped (i.e., spikes from the same train falling in the same bin are counted as one event). The list of spike IDs within each bin form the corresponding transaction. Parameters ---------- spiketrains : list of neo.SpikeTrain or list of tuple A list of `neo.SpikeTrain` objects, or list of pairs (Train_ID, `neo.SpikeTrain`), where `Train_ID` can be any hashable object. bin_size : pq.Quantity Width of each time bin. Time is binned to determine synchrony. t_start : pq.Quantity The starting time. Only spikes occurring at times `t >= t_start` are considered. The first transaction contains spikes falling into the time segment `[t_start, t_start+bin_size]`. If None, takes the value of `spiketrain.t_start`, common for all input `spiketrains` (raises ValueError if it's not the case). Default: None. t_stop : pq.Quantity The ending time. Only spikes occurring at times `t < t_stop` are considered. If None, takes the value of `spiketrain.t_stop`, common for all input `spiketrains` (raises ValueError if it's not the case). Default: None. ids : list of int, optional List of spike train IDs. If None, the IDs `0` to `N-1` are used, where `N` is the number of input spike trains. Default: None. Returns ------- list of list A list of transactions, where each transaction corresponds to a time bin and represents the list of spike train IDs having a spike in that time bin. Raises ------ TypeError If `spiketrains` is not a list of `neo.SpikeTrain` or a list of tuples (id, `neo.SpikeTrain`). """ if all(isinstance(st, neo.SpikeTrain) for st in spiketrains): trains = spiketrains if ids is None: ids = range(len(spiketrains)) else: # (id, SpikeTrain) pairs try: ids, trains = zip(*spiketrains) except TypeError: raise TypeError('spiketrains must be either a list of ' + 'SpikeTrains or a list of (id, SpikeTrain) pairs') # Bin the spike trains and take for each of them the ids of filled bins binned = conv.BinnedSpikeTrain( trains, bin_size=bin_size, t_start=t_start, t_stop=t_stop) filled_bins = binned.spike_indices # Compute and return the transaction list return [[train_id for train_id, b in zip(ids, filled_bins) if bin_id in b] for bin_id in range(binned.n_bins)] def _analog_signal_step_interp(signal, times): """ Compute the step-wise interpolation of a signal at desired times. Given a signal (e.g. a `neo.AnalogSignal`) `s` taking values `s[t0]` and `s[t1]` at two consecutive time points `t0` and `t1` (`t0 < t1`), the value of the step-wise interpolation at time `t: t0 <= t < t1` is given by `s[t] = s[t0]`. Parameters ---------- signal : neo.AnalogSignal The analog signal, containing the discretization of the function to interpolate. times : pq.Quantity A vector of time points at which the step interpolation is computed. Returns ------- pq.Quantity Object with same shape of `times` and containing the values of the interpolated signal at the time points in `times`. """ dt = signal.sampling_period # Compute the ids of the signal times to the left of each time in times time_ids = np.floor( ((times - signal.t_start) / dt).rescale( pq.dimensionless).magnitude).astype('i') return (signal.magnitude[time_ids] * signal.units).rescale(signal.units) # ============================================================================= # HERE ASSET STARTS # ============================================================================= def _stretched_metric_2d(x, y, stretch, ref_angle): r""" Given a list of points on the real plane, identified by their abscissa `x` and ordinate `y`, compute a stretched transformation of the Euclidean distance among each of them. The classical euclidean distance `d` between points `(x1, y1)` and `(x2, y2)`, i.e., :math:`\sqrt((x1-x2)^2 + (y1-y2)^2)`, is multiplied by a factor .. math:: 1 + (stretch - 1.) * \abs(\sin(ref_angle - \theta)), where :math:`\theta` is the angle between the points and the 45 degree direction (i.e., the line `y = x`). The stretching factor thus steadily varies between 1 (if the line connecting `(x1, y1)` and `(x2, y2)` has inclination `ref_angle`) and `stretch` (if that line has inclination `90 + ref_angle`). Parameters ---------- x : (n,) np.ndarray Array of abscissas of all points among which to compute the distance. y : (n,) np.ndarray Array of ordinates of all points among which to compute the distance (same shape as `x`). stretch : float Maximum stretching factor, applied if the line connecting the points has inclination `90 + ref_angle`. ref_angle : float Reference angle in degrees (i.e., the inclination along which the stretching factor is 1). Returns ------- D : (n,n) np.ndarray Square matrix of distances between all pairs of points. """ alpha = np.deg2rad(ref_angle) # reference angle in radians # Create the array of points (one per row) for which to compute the # stretched distance points = np.vstack([x, y]).T # Compute the matrix D[i, j] of euclidean distances among points i and j D = scipy.spatial.distance_matrix(points, points) # Compute the angular coefficients of the line between each pair of points x_array = np.tile(x, reps=(len(x), 1)) y_array = np.tile(y, reps=(len(y), 1)) dX = x_array.T - x_array # dX[i,j]: x difference between points i and j dY = y_array.T - y_array # dY[i,j]: y difference between points i and j # Compute the matrix Theta of angles between each pair of points theta = np.arctan2(dY, dX) # Transform [-pi, pi] back to [-pi/2, pi/2] theta[theta < -np.pi / 2] += np.pi theta[theta > np.pi / 2] -= np.pi # Compute the matrix of stretching factors for each pair of points stretch_mat = 1 + (stretch - 1.) * np.abs(np.sin(alpha - theta)) # Return the stretched distance matrix return D * stretch_mat def _interpolate_signals(signals, sampling_times, verbose=False): """ Interpolate signals at given sampling times. """ # Reshape all signals to one-dimensional array object (e.g. AnalogSignal) for i, signal in enumerate(signals): if signal.ndim == 2: signals[i] = signal.flatten() elif signal.ndim > 2: raise ValueError('elements in fir_rates must have 2 dimensions') if verbose: print('create time slices of the rates...') # Interpolate in the time bins interpolated_signal = np.vstack([_analog_signal_step_interp( signal, sampling_times).rescale('Hz').magnitude for signal in signals]) * pq.Hz return interpolated_signal def _num_iterations(n, d): if d > n: return 0 if d == 1: return n if d == 2: # equivalent to np.sum(count_matrix) return n * (n + 1) // 2 - 1 # Create square matrix with diagonal values equal to 2 to `n`. # Start from row/column with index == 2 to facilitate indexing. count_matrix = np.zeros((n + 1, n + 1), dtype=int) np.fill_diagonal(count_matrix, np.arange(n + 1)) count_matrix[1, 1] = 0 # Accumulate counts of all the iterations where the first index # is in the interval `d` to `n`. # # The counts for every level is obtained by accumulating the # `count_matrix`, which is the count of iterations with the first # index between `d` and `n`, when `d` == 2. # # For every value from 3 to `d`... # 1. Define each row `n` in the count matrix as the sum of all rows # equal or above. # 2. Set all rows above the current value of `d` with zeros. # # Example for `n` = 6 and `d` = 4: # # d = 2 (start) d = 3 # count count # n n # 2 2 0 0 0 0 # 3 0 3 0 0 0 ==> 3 2 3 0 0 0 ==> # 4 0 0 4 0 0 4 2 3 4 0 0 # 5 0 0 0 5 0 5 2 3 4 5 0 # 6 0 0 0 0 6 6 2 3 4 5 6 # # d = 4 # count # n # # 4 4 6 4 0 0 # 5 6 9 8 5 0 # 6 8 12 12 10 6 # # The total number is the sum of the `count_matrix` when `d` has # the value passed to the function. # for cur_d in range(3, d + 1): for cur_n in range(n, 2, -1): count_matrix[cur_n, :] = np.sum(count_matrix[:cur_n + 1, :], axis=0) # Set previous `d` level to zeros count_matrix[cur_d - 1, :] = 0 return np.sum(count_matrix) def _combinations_with_replacement(n, d): # Generate sequences of {a_i} such that # a_0 >= a_1 >= ... >= a_(d-1) and # d-i <= a_i <= n, for each i in [0, d-1]. # # Almost equivalent to # list(itertools.combinations_with_replacement(range(n, 0, -1), r=d))[::-1] # # Example: # _combinations_with_replacement(n=13, d=3) --> # (3, 2, 1), (3, 2, 2), (3, 3, 1), ... , (13, 13, 12), (13, 13, 13). # # The implementation follows the insertion sort algorithm: # insert a new element a_i from right to left to keep the reverse sorted # order. Now substitute increment operation for insert. if d > n: return if d == 1: for matrix_entry in range(1, n + 1): yield (matrix_entry,) return sequence_sorted = list(range(d, 0, -1)) input_order = tuple(sequence_sorted) # fixed while sequence_sorted[0] != n + 1: for last_element in range(1, sequence_sorted[-2] + 1): sequence_sorted[-1] = last_element yield tuple(sequence_sorted) increment_id = d - 2 while increment_id > 0 and sequence_sorted[increment_id - 1] == \ sequence_sorted[increment_id]: increment_id -= 1 sequence_sorted[increment_id + 1:] = input_order[increment_id + 1:] sequence_sorted[increment_id] += 1 def _jsf_uniform_orderstat_3d(u, n, verbose=False): r""" Considered n independent random variables X1, X2, ..., Xn all having uniform distribution in the interval (0, 1): .. centered:: Xi ~ Uniform(0, 1), given a 2D matrix U = (u_ij) where each U_i is an array of length d: U_i = [u0, u1, ..., u_{d-1}] of quantiles, with u1 <= u2 <= ... <= un, computes the joint survival function (jsf) of the d highest order statistics (U_{n-d+1}, U_{n-d+2}, ..., U_n), where U_k := "k-th highest X's" at each u_i, i.e.: .. centered:: jsf(u_i) = Prob(U_{n-k} >= u_ijk, k=0,1,..., d-1). Parameters ---------- u : (A,d) np.ndarray 2D matrix of floats between 0 and 1. Each row `u_i` is an array of length `d`, considered a set of `d` largest order statistics extracted from a sample of `n` random variables whose cdf is `F(x) = x` for each `x`. The routine computes the joint cumulative probability of the `d` values in `u_ij`, for each `i` and `j`. n : int Size of the sample where the `d` largest order statistics `u_ij` are assumed to have been sampled from. verbose : bool If True, print messages during the computation. Default: False. Returns ------- P_total : (A,) np.ndarray Matrix of joint survival probabilities. `s_ij` is the joint survival probability of the values `{u_ijk, k=0, ..., d-1}`. Note: the joint probability matrix computed for the ASSET analysis is `1 - S`. """ num_p_vals, d = u.shape # Define ranges [1,...,n], [2,...,n], ..., [d,...,n] for the mute variables # used to compute the integral as a sum over all possibilities it_todo = _num_iterations(n, d) log_1 = np.log(1.) # Compute the log of the integral's coefficient logK = np.sum(np.log(np.arange(1, n + 1))) # Add to the 3D matrix u a bottom layer equal to 0 and a # top layer equal to 1. Then compute the difference du along # the first dimension. du = np.diff(u, prepend=0, append=1, axis=1) # precompute logarithms # ignore warnings about infinities, see inside the loop: # we replace 0 * ln(0) by 1 to get exp(0 * ln(0)) = 0 ** 0 = 1 # the remaining infinities correctly evaluate to # exp(ln(0)) = exp(-inf) = 0 with warnings.catch_warnings(): warnings.simplefilter('ignore', RuntimeWarning) log_du = np.log(du) # prepare arrays for usage inside the loop di_scratch = np.empty_like(du, dtype=np.int32) log_du_scratch = np.empty_like(log_du) # precompute log(factorial)s # pad with a zero to get 0! = 1 log_factorial = np.hstack((0, np.cumsum(np.log(range(1, n + 1))))) # compute the probabilities for each unique row of du # only loop over the indices and do all du entries at once # using matrix algebra # initialise probabilities to 0 P_total = np.zeros(du.shape[0], dtype=np.float32) for iter_id, matrix_entries in enumerate( tqdm(_combinations_with_replacement(n, d=d), total=it_todo, desc="Joint survival function", disable=not verbose)): # if we are running with MPI if mpi_accelerated and iter_id % size != rank: continue # we only need the differences of the indices: di = -np.diff((n,) + matrix_entries + (0,)) # reshape the matrix to be compatible with du di_scratch[:, range(len(di))] = di # use precomputed factorials sum_log_di_factorial = log_factorial[di].sum() # Compute for each i,j the contribution to the probability # given by this step, and add it to the total probability # Use precomputed log np.copyto(log_du_scratch, log_du) # for each a=0,1,...,A-1 and b=0,1,...,B-1, replace du with 1 # whenever di_scratch = 0, so that du ** di_scratch = 1 (this avoids # nans when both du and di_scratch are 0, and is mathematically # correct) log_du_scratch[di_scratch == 0] = log_1 di_log_du = di_scratch * log_du_scratch sum_di_log_du = di_log_du.sum(axis=1) logP = sum_di_log_du - sum_log_di_factorial P_total += np.exp(logP + logK) if mpi_accelerated: totals = np.zeros(du.shape[0], dtype=np.float32) # exchange all the results comm.Allreduce( [P_total, MPI.FLOAT], [totals, MPI.FLOAT], op=MPI.SUM) # We need to return the collected totals instead of the local P_total return totals return P_total def _pmat_neighbors(mat, filter_shape, n_largest): """ Build the 3D matrix `L` of largest neighbors of elements in a 2D matrix `mat`. For each entry `mat[i, j]`, collects the `n_largest` elements with largest values around `mat[i, j]`, say `z_i, i=1,2,...,n_largest`, and assigns them to `L[i, j, :]`. The zone around `mat[i, j]` where largest neighbors are collected from is a rectangular area (kernel) of shape `(l, w) = filter_shape` centered around `mat[i, j]` and aligned along the diagonal. If `mat` is symmetric, only the triangle below the diagonal is considered. Parameters ---------- mat : np.ndarray A square matrix of real-valued elements. filter_shape : tuple of int A pair of integers representing the kernel shape `(l, w)`. n_largest : int The number of largest neighbors to collect for each entry in `mat`. Returns ------- lmat : np.ndarray A matrix of shape `(n_largest, l, w)` containing along the first dimension `lmat[:, i, j]` the largest neighbors of `mat[i, j]`. Raises ------ ValueError If `filter_shape[1]` is not lower than `filter_shape[0]`. Warns ----- UserWarning If both entries in `filter_shape` are not odd values (i.e., the kernel is not centered on the data point used in the calculation). """ l, w = filter_shape # if the matrix is symmetric the diagonal was set to 0.5 # when computing the probability matrix symmetric = np.all(np.diagonal(mat) == 0.5) # Check consistent arguments if w >= l: raise ValueError('filter_shape width must be lower than length') if not ((w % 2) and (l % 2)): warnings.warn('The kernel is not centered on the datapoint in whose' 'calculation it is used. Consider using odd values' 'for both entries of filter_shape.') # Construct the kernel filt = np.ones((l, l), dtype=np.float32) filt = np.triu(filt, -w) filt = np.tril(filt, w) # Convert mat values to floats, and replaces np.infs with specified input # values mat = np.array(mat, dtype=np.float32) # Initialize the matrix of d-largest values as a matrix of zeroes lmat = np.zeros((n_largest, mat.shape[0], mat.shape[1]), dtype=np.float32) N_bin_y = mat.shape[0] N_bin_x = mat.shape[1] # if the matrix is symmetric do not use kernel positions intersected # by the diagonal if symmetric: bin_range_y = range(l, N_bin_y - l + 1) else: bin_range_y = range(N_bin_y - l + 1) bin_range_x = range(N_bin_x - l + 1) # compute matrix of largest values for y in bin_range_y: if symmetric: # x range depends on y position bin_range_x = range(y - l + 1) for x in bin_range_x: patch = mat[y: y + l, x: x + l] mskd = np.multiply(filt, patch) largest_vals = np.sort(mskd, axis=None)[-n_largest:] lmat[:, y + (l // 2), x + (l // 2)] = largest_vals return lmat def synchronous_events_intersection(sse1, sse2, intersection='linkwise'): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of positions `(iK, jK)` of matrix entries and associated synchronous events `SK`, finds the intersection among them. The intersection can be performed 'pixelwise' or 'linkwise'. * if 'pixelwise', it yields a new SSE which retains only events in `sse1` whose pixel position matches a pixel position in `sse2`. This operation is not symmetric: `intersection(sse1, sse2) != intersection(sse2, sse1)`. * if 'linkwise', an additional step is performed where each retained synchronous event `SK` in `sse1` is intersected with the corresponding event in `sse2`. This yields a symmetric operation: `intersection(sse1, sse2) = intersection(sse2, sse1)`. Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Each is a dictionary of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values (see above). intersection : {'pixelwise', 'linkwise'}, optional The type of intersection to perform among the two SSEs (see above). Default: 'linkwise'. Returns ------- sse_new : dict A new SSE (same structure as `sse1` and `sse2`) which retains only the events of `sse1` associated to keys present both in `sse1` and `sse2`. If `intersection = 'linkwise'`, such events are additionally intersected with the associated events in `sse2`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ sse_new = sse1.copy() for pixel1 in sse1.keys(): if pixel1 not in sse2.keys(): del sse_new[pixel1] if intersection == 'linkwise': for pixel1, link1 in sse_new.items(): sse_new[pixel1] = link1.intersection(sse2[pixel1]) if len(sse_new[pixel1]) == 0: del sse_new[pixel1] elif intersection == 'pixelwise': pass else: raise ValueError( "intersection (=%s) can only be" % intersection + " 'pixelwise' or 'linkwise'") return sse_new def synchronous_events_difference(sse1, sse2, difference='linkwise'): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), computes the difference between `sse1` and `sse2`. The difference can be performed 'pixelwise' or 'linkwise': * if 'pixelwise', it yields a new SSE which contains all (and only) the events in `sse1` whose pixel position doesn't match any pixel in `sse2`. * if 'linkwise', for each pixel `(i, j)` in `sse1` and corresponding synchronous event `S1`, if `(i, j)` is a pixel in `sse2` corresponding to the event `S2`, it retains the set difference `S1 - S2`. If `(i, j)` is not a pixel in `sse2`, it retains the full set `S1`. Note that in either case the difference is a non-symmetric operation: `intersection(sse1, sse2) != intersection(sse2, sse1)`. Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values (see above). difference : {'pixelwise', 'linkwise'}, optional The type of difference to perform between `sse1` and `sse2` (see above). Default: 'linkwise'. Returns ------- sse_new : dict A new SSE (same structure as `sse1` and `sse2`) which retains the difference between `sse1` and `sse2`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ sse_new = sse1.copy() for pixel1 in sse1.keys(): if pixel1 in sse2.keys(): if difference == 'pixelwise': del sse_new[pixel1] elif difference == 'linkwise': sse_new[pixel1] = sse_new[pixel1].difference(sse2[pixel1]) if len(sse_new[pixel1]) == 0: del sse_new[pixel1] else: raise ValueError( "difference (=%s) can only be" % difference + " 'pixelwise' or 'linkwise'") return sse_new def _remove_empty_events(sse): """ Given a sequence of synchronous events (SSE) `sse` consisting of a pool of pixel positions and associated synchronous events (see below), returns a copy of `sse` where all empty events have been removed. `sse` must be provided as a dictionary of type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse : dict A dictionary of pixel positions `(i, j)` as keys, and sets `S` of synchronous events as values (see above). Returns ------- sse_new : dict A copy of `sse` where all empty events have been removed. """ sse_new = sse.copy() for pixel, link in sse.items(): if link == set([]): del sse_new[pixel] return sse_new def synchronous_events_identical(sse1, sse2): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), determines whether `sse1` is strictly contained in `sse2`. `sse1` is strictly contained in `sse2` if all its pixels are pixels of `sse2`, if its associated events are subsets of the corresponding events in `sse2`, and if `sse2` contains events, or neuron IDs in some event, which do not belong to `sse1` (i.e., `sse1` and `sse2` are not identical). Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values. Returns ------- bool True if `sse1` is identical to `sse2`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ # Remove empty links from sse11 and sse22, if any sse11 = _remove_empty_events(sse1) sse22 = _remove_empty_events(sse2) # Return whether sse11 == sse22 return sse11 == sse22 def synchronous_events_no_overlap(sse1, sse2): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), determines whether `sse1` and `sse2` are disjoint. Two SSEs are disjoint if they don't share pixels, or if the events associated to common pixels are disjoint. Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values. Returns ------- bool True if `sse1` is disjoint from `sse2`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ # Remove empty links from sse11 and sse22, if any sse11 = _remove_empty_events(sse1) sse22 = _remove_empty_events(sse2) # If both SSEs are empty, return False (we consider them equal) if sse11 == {} and sse22 == {}: return False common_pixels = set(sse11.keys()).intersection(set(sse22.keys())) if common_pixels == set([]): return True elif all(sse11[p].isdisjoint(sse22[p]) for p in common_pixels): return True else: return False def synchronous_events_contained_in(sse1, sse2): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), determines whether `sse1` is strictly contained in `sse2`. `sse1` is strictly contained in `sse2` if all its pixels are pixels of `sse2`, if its associated events are subsets of the corresponding events in `sse2`, and if `sse2` contains non-empty events, or neuron IDs in some event, which do not belong to `sse1` (i.e., `sse1` and `sse2` are not identical). Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values. Returns ------- bool True if `sse1` is a subset of `sse2`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ # Remove empty links from sse11 and sse22, if any sse11 = _remove_empty_events(sse1) sse22 = _remove_empty_events(sse2) # Return False if sse11 and sse22 are disjoint if synchronous_events_identical(sse11, sse22): return False # Return False if any pixel in sse1 is not contained in sse2, or if any # link of sse1 is not a subset of the corresponding link in sse2. # Otherwise (if sse1 is a subset of sse2) continue for pixel1, link1 in sse11.items(): if pixel1 not in sse22.keys(): return False elif not link1.issubset(sse22[pixel1]): return False # Check that sse1 is a STRICT subset of sse2, i.e. that sse2 contains at # least one pixel or neuron id not present in sse1. return not synchronous_events_identical(sse11, sse22) def synchronous_events_contains_all(sse1, sse2): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), determines whether `sse1` strictly contains `sse2`. `sse1` strictly contains `sse2` if it contains all pixels of `sse2`, if all associated events in `sse1` contain those in `sse2`, and if `sse1` additionally contains other pixels / events not contained in `sse2`. Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values. Returns ------- bool True if `sse1` strictly contains `sse2`. Notes ----- `synchronous_events_contains_all(sse1, sse2)` is identical to `synchronous_events_is_subsequence(sse2, sse1)`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ return synchronous_events_contained_in(sse2, sse1) def synchronous_events_overlap(sse1, sse2): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), determines whether the two SSEs overlap. The SSEs overlap if they are not equal and none of them is a superset of the other one but they are also not disjoint. Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values. Returns ------- bool True if `sse1` and `sse2` overlap. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ contained_in = synchronous_events_contained_in(sse1, sse2) contains_all = synchronous_events_contains_all(sse1, sse2) identical = synchronous_events_identical(sse1, sse2) is_disjoint = synchronous_events_no_overlap(sse1, sse2) return not (contained_in or contains_all or identical or is_disjoint) def _signals_t_start_stop(signals, t_start=None, t_stop=None): if t_start is None: t_start = _signals_same_attribute(signals, 't_start') if t_stop is None: t_stop = _signals_same_attribute(signals, 't_stop') return t_start, t_stop def _intersection_matrix(spiketrains, spiketrains_y, bin_size, t_start_x, t_start_y, t_stop_x, t_stop_y, normalization=None): if spiketrains_y is None: spiketrains_y = spiketrains # Compute the binned spike train matrices, along both time axes spiketrains_binned = conv.BinnedSpikeTrain( spiketrains, bin_size=bin_size, t_start=t_start_x, t_stop=t_stop_x) spiketrains_binned_y = conv.BinnedSpikeTrain( spiketrains_y, bin_size=bin_size, t_start=t_start_y, t_stop=t_stop_y) # Compute imat by matrix multiplication bsts_x = spiketrains_binned.sparse_matrix bsts_y = spiketrains_binned_y.sparse_matrix # Compute the number of spikes in each bin, for both time axes # 'A1' property returns self as a flattened ndarray. spikes_per_bin_x = bsts_x.sum(axis=0).A1 spikes_per_bin_y = bsts_y.sum(axis=0).A1 # Compute the intersection matrix imat imat = bsts_x.T.dot(bsts_y).toarray().astype(np.float32) for ii in range(bsts_x.shape[1]): # Normalize the row col_sum = bsts_x[:, ii].sum() if normalization is None or col_sum == 0: norm_coef = 1. elif normalization == 'intersection': norm_coef = np.minimum( spikes_per_bin_x[ii], spikes_per_bin_y) elif normalization == 'mean': # geometric mean norm_coef = np.sqrt( spikes_per_bin_x[ii] * spikes_per_bin_y) elif normalization == 'union': norm_coef = np.array([(bsts_x[:, ii] + bsts_y[:, jj]).count_nonzero() for jj in range(bsts_y.shape[1])]) else: raise ValueError( "Invalid parameter 'norm': {}".format(normalization)) # If normalization required, for each j such that bsts_y[j] is # identically 0 the code above sets imat[:, j] to identically nan. # Substitute 0s instead. imat[ii, :] = np.divide(imat[ii, :], norm_coef, out=np.zeros(imat.shape[1], dtype=np.float32), where=norm_coef != 0) # Return the intersection matrix and the edges of the bins used for the # x and y axes, respectively. return imat class ASSET(object): """ Analysis of Sequences of Synchronous EvenTs class. Parameters ---------- spiketrains_i, spiketrains_j : list of neo.SpikeTrain Input spike trains for the first and second time dimensions, respectively, to compute the p-values from. If `spiketrains_y` is None, it's set to `spiketrains`. bin_size : pq.Quantity, optional The width of the time bins used to compute the probability matrix. t_start_i, t_start_j : pq.Quantity, optional The start time of the binning for the first and second axes, respectively. If None, the attribute `t_start` of the spike trains is used (if the same for all spike trains). Default: None. t_stop_i, t_stop_j : pq.Quantity, optional The stop time of the binning for the first and second axes, respectively. If None, the attribute `t_stop` of the spike trains is used (if the same for all spike trains). Default: None. verbose : bool, optional If True, print messages and show progress bar. Default: True. Raises ------ ValueError If the `t_start` & `t_stop` times are not (one of): perfectly aligned; fully disjoint. """ def __init__(self, spiketrains_i, spiketrains_j=None, bin_size=3 * pq.ms, t_start_i=None, t_start_j=None, t_stop_i=None, t_stop_j=None, verbose=True): self.spiketrains_i = spiketrains_i if spiketrains_j is None: spiketrains_j = spiketrains_i self.spiketrains_j = spiketrains_j self.bin_size = bin_size self.t_start_i, self.t_stop_i = _signals_t_start_stop( spiketrains_i, t_start=t_start_i, t_stop=t_stop_i) self.t_start_j, self.t_stop_j = _signals_t_start_stop( spiketrains_j, t_start=t_start_j, t_stop=t_stop_j) self.verbose = verbose msg = 'The time intervals for x and y need to be either identical ' \ 'or fully disjoint, but they are:\n' \ 'x: ({}, {}) and y: ({}, {}).'.format(self.t_start_i, self.t_stop_i, self.t_start_j, self.t_stop_j) # the starts have to be perfectly aligned for the binning to work # the stops can differ without impacting the binning if self.t_start_i == self.t_start_j: if not _quantities_almost_equal(self.t_stop_i, self.t_stop_j): raise ValueError(msg) elif (self.t_start_i < self.t_start_j < self.t_stop_i) \ or (self.t_start_i < self.t_stop_j < self.t_stop_i): raise ValueError(msg) # Compute the binned spike train matrices, along both time axes self.spiketrains_binned_i = conv.BinnedSpikeTrain( self.spiketrains_i, bin_size=self.bin_size, t_start=self.t_start_i, t_stop=self.t_stop_i) self.spiketrains_binned_j = conv.BinnedSpikeTrain( self.spiketrains_j, bin_size=self.bin_size, t_start=self.t_start_j, t_stop=self.t_stop_j) @property def x_edges(self): """ A Quantity array of `n+1` edges of the bins used for the horizontal axis of the intersection matrix, where `n` is the number of bins that time was discretized in. """ return self.spiketrains_binned_i.bin_edges.rescale(self.bin_size.units) @property def y_edges(self): """ A Quantity array of `n+1` edges of the bins used for the vertical axis of the intersection matrix, where `n` is the number of bins that time was discretized in. """ return self.spiketrains_binned_j.bin_edges.rescale(self.bin_size.units) def is_symmetric(self): """ Returns ------- bool Whether the intersection matrix is symmetric or not. See Also -------- ASSET.intersection_matrix """ return _quantities_almost_equal(self.x_edges[0], self.y_edges[0]) def intersection_matrix(self, normalization=None): """ Generates the intersection matrix from a list of spike trains. Given a list of `neo.SpikeTrain`, consider two binned versions of them differing for the starting and ending times of the binning: `t_start_x`, `t_stop_x`, `t_start_y` and `t_stop_y` respectively (the time intervals can be either identical or completely disjoint). Then calculate the intersection matrix `M` of the two binned data, where `M[i,j]` is the overlap of bin `i` in the first binned data and bin `j` in the second binned data (i.e., the number of spike trains spiking at both bin `i` and bin `j`). The matrix entries can be normalized to values between `0` and `1` via different normalizations (see "Parameters" section). Parameters ---------- normalization : {'intersection', 'mean', 'union'} or None, optional The normalization type to be applied to each entry `M[i,j]` of the intersection matrix `M`. Given the sets `s_i` and `s_j` of neuron IDs in the bins `i` and `j` respectively, the normalization coefficient can be: * None: no normalisation (row counts) * 'intersection': `len(intersection(s_i, s_j))` * 'mean': `sqrt(len(s_1) * len(s_2))` * 'union': `len(union(s_i, s_j))` Default: None. Returns ------- imat : (n,n) np.ndarray The floating point intersection matrix of a list of spike trains. It has the shape `(n, n)`, where `n` is the number of bins that time was discretized in. """ imat = _intersection_matrix(self.spiketrains_i, self.spiketrains_j, self.bin_size, self.t_start_i, self.t_start_j, self.t_stop_i, self.t_stop_j, normalization=normalization) return imat def probability_matrix_montecarlo(self, n_surrogates, imat=None, surrogate_method='dither_spikes', surrogate_dt=None): """ Given a list of parallel spike trains, estimate the cumulative probability of each entry in their intersection matrix by a Monte Carlo approach using surrogate data. Contrarily to the analytical version (see :func:`ASSET.probability_matrix_analytical`) the Monte Carlo one does not incorporate the assumptions of Poissonianity in the null hypothesis. The method produces surrogate spike trains (using one of several methods at disposal, see "Parameters" section) and calculates their intersection matrix `M`. For each entry `(i, j)`, the intersection CDF `P[i, j]` is then given by: .. centered:: P[i, j] = #(spike_train_surrogates such that M[i, j] < I[i, j]) / #(spike_train_surrogates) If `P[i, j]` is large (close to 1), `I[i, j]` is statistically significant: the probability to observe an overlap equal to or larger than `I[i, j]` under the null hypothesis is `1 - P[i, j]`, very small. Parameters ---------- n_surrogates : int The number of spike train surrogates to generate for the bootstrap procedure. imat : (n,n) np.ndarray or None, optional The floating point intersection matrix of a list of spike trains. It has the shape `(n, n)`, where `n` is the number of bins that time was discretized in. If None, the output of :func:`ASSET.intersection_matrix` is used. Default: None surrogate_method : {'dither_spike_train', 'dither_spikes', 'jitter_spikes', 'randomise_spikes', 'shuffle_isis', 'joint_isi_dithering'}, optional The method to generate surrogate spike trains. Refer to the :func:`spike_train_surrogates.surrogates` documentation for more information about each surrogate method. Note that some of these methods need `surrogate_dt` parameter, others ignore it. Default: 'dither_spike_train'. surrogate_dt : pq.Quantity, optional For surrogate methods shifting spike times randomly around their original time ('dither_spike_train', 'dither_spikes') or replacing them randomly within a certain window ('jitter_spikes'), `surrogate_dt` represents the size of that shift (window). For other methods, `surrogate_dt` is ignored. If None, it's set to `self.bin_size * 5`. Default: None. Returns ------- pmat : np.ndarray The cumulative probability matrix. `pmat[i, j]` represents the estimated probability of having an overlap between bins `i` and `j` STRICTLY LOWER than the observed overlap, under the null hypothesis of independence of the input spike trains. Notes ----- We recommend playing with `surrogate_dt` parameter to see how it influences the result matrix. For this, refer to the ASSET tutorial. See Also -------- ASSET.probability_matrix_analytical : analytical derivation of the matrix """ if imat is None: # Compute the intersection matrix of the original data imat = self.intersection_matrix() if surrogate_dt is None: surrogate_dt = self.bin_size * 5 symmetric = self.is_symmetric() # Generate surrogate spike trains as a list surrs # Compute the p-value matrix pmat; pmat[i, j] counts the fraction of # surrogate data whose intersection value at (i, j) is lower than or # equal to that of the original data pmat = np.zeros(imat.shape, dtype=np.int32) for surr_id in trange(n_surrogates, desc="pmat_bootstrap", disable=not self.verbose): if mpi_accelerated and surr_id % size != rank: continue surrogates = [spike_train_surrogates.surrogates( st, n_surrogates=1, method=surrogate_method, dt=surrogate_dt, decimals=None, edges=True)[0] for st in self.spiketrains_i] if symmetric: surrogates_y = surrogates else: surrogates_y = [spike_train_surrogates.surrogates( st, n_surrogates=1, method=surrogate_method, dt=surrogate_dt, decimals=None, edges=True)[0] for st in self.spiketrains_j] imat_surr = _intersection_matrix(surrogates, surrogates_y, self.bin_size, self.t_start_i, self.t_start_j, self.t_stop_i, self.t_stop_j) pmat += (imat_surr <= (imat - 1)) del imat_surr if mpi_accelerated: pmat = comm.allreduce(pmat, op=MPI.SUM) pmat = pmat * 1. / n_surrogates if symmetric: np.fill_diagonal(pmat, 0.5) return pmat def probability_matrix_analytical(self, imat=None, firing_rates_x='estimate', firing_rates_y='estimate', kernel_width=100 * pq.ms): r""" Given a list of spike trains, approximates the cumulative probability of each entry in their intersection matrix. The approximation is analytical and works under the assumptions that the input spike trains are independent and Poisson. It works as follows: * Bin each spike train at the specified `bin_size`: this yields a binary array of 1s (spike in bin) and 0s (no spike in bin; clipping used); * If required, estimate the rate profile of each spike train by convolving the binned array with a boxcar kernel of user-defined length; * For each neuron `k` and each pair of bins `i` and `j`, compute the probability :math:`p_ijk` that neuron `k` fired in both bins `i` and `j`. * Approximate the probability distribution of the intersection value at `(i, j)` by a Poisson distribution with mean parameter :math:`l = \sum_k (p_ijk)`, justified by Le Cam's approximation of a sum of independent Bernouilli random variables with a Poisson distribution. Parameters ---------- imat : (n,n) np.ndarray or None, optional The intersection matrix of a list of spike trains. It has the shape `(n, n)`, where `n` is the number of bins that time was discretized in. If None, the output of :func:`ASSET.intersection_matrix` is used. Default: None firing_rates_x, firing_rates_y : list of neo.AnalogSignal or 'estimate' If a list, `firing_rates[i]` is the firing rate of the spike train `spiketrains[i]`. If 'estimate', firing rates are estimated by simple boxcar kernel convolution, with the specified `kernel_width`. Default: 'estimate'. kernel_width : pq.Quantity, optional The total width of the kernel used to estimate the rate profiles when `firing_rates` is 'estimate'. Default: 100 * pq.ms. Returns ------- pmat : np.ndarray The cumulative probability matrix. `pmat[i, j]` represents the estimated probability of having an overlap between bins `i` and `j` STRICTLY LOWER than the observed overlap, under the null hypothesis of independence of the input spike trains. """ if imat is None: # Compute the intersection matrix of the original data imat = self.intersection_matrix() symmetric = self.is_symmetric() bsts_x_matrix = self.spiketrains_binned_i.to_bool_array() if symmetric: bsts_y_matrix = bsts_x_matrix else: bsts_y_matrix = self.spiketrains_binned_j.to_bool_array() # Check that the nr. neurons is identical between the two axes if bsts_x_matrix.shape[0] != bsts_y_matrix.shape[0]: raise ValueError( 'Different number of neurons along the x and y axis!') # Define the firing rate profiles if firing_rates_x == 'estimate': # If rates are to be estimated, create the rate profiles as # Quantity objects obtained by boxcar-kernel convolution fir_rate_x = self._rate_of_binned_spiketrain(bsts_x_matrix, kernel_width) elif isinstance(firing_rates_x, list): # If rates provided as lists of AnalogSignals, create time slices # for both axes, interpolate in the time bins of interest and # convert to Quantity fir_rate_x = _interpolate_signals( firing_rates_x, self.spiketrains_binned_i.bin_edges[:-1], self.verbose) else: raise ValueError( 'fir_rates_x must be a list or the string "estimate"') if symmetric: fir_rate_y = fir_rate_x elif firing_rates_y == 'estimate': fir_rate_y = self._rate_of_binned_spiketrain(bsts_y_matrix, kernel_width) elif isinstance(firing_rates_y, list): # If rates provided as lists of AnalogSignals, create time slices # for both axes, interpolate in the time bins of interest and # convert to Quantity fir_rate_y = _interpolate_signals( firing_rates_y, self.spiketrains_binned_j.bin_edges[:-1], self.verbose) else: raise ValueError( 'fir_rates_y must be a list or the string "estimate"') # For each neuron, compute the prob. that that neuron spikes in any bin if self.verbose: print('compute the prob. that each neuron fires in each pair of ' 'bins...') spike_probs_x = [1. - np.exp(-(rate * self.bin_size).rescale( pq.dimensionless).magnitude) for rate in fir_rate_x] if symmetric: spike_probs_y = spike_probs_x else: spike_probs_y = [1. - np.exp(-(rate * self.bin_size).rescale( pq.dimensionless).magnitude) for rate in fir_rate_y] # For each neuron k compute the matrix of probabilities p_ijk that # neuron k spikes in both bins i and j. (For i = j it's just spike # probs[k][i]) spike_prob_mats = [np.outer(probx, proby) for (probx, proby) in zip(spike_probs_x, spike_probs_y)] # Compute the matrix Mu[i, j] of parameters for the Poisson # distributions which describe, at each (i, j), the approximated # overlap probability. This matrix is just the sum of the probability # matrices computed above if self.verbose: print( "compute the probability matrix by Le Cam's approximation...") Mu = np.sum(spike_prob_mats, axis=0) # Compute the probability matrix obtained from imat using the Poisson # pdfs pmat = scipy.stats.poisson.cdf(imat - 1, Mu) if symmetric: # Substitute 0.5 to the elements along the main diagonal if self.verbose: print("substitute 0.5 to elements along the main diagonal...") np.fill_diagonal(pmat, 0.5) return pmat def joint_probability_matrix(self, pmat, filter_shape, n_largest, min_p_value=1e-5): """ Map a probability matrix `pmat` to a joint probability matrix `jmat`, where `jmat[i, j]` is the joint p-value of the largest neighbors of `pmat[i, j]`. The values of `pmat` are assumed to be uniformly distributed in the range [0, 1]. Centered a rectangular kernel of shape `filter_shape=(l, w)` around each entry `pmat[i, j]`, aligned along the diagonal where `pmat[i, j]` lies into, extracts the `n_largest` values falling within the kernel and computes their joint p-value `jmat[i, j]`. Parameters ---------- pmat : np.ndarray A square matrix, the output of :func:`ASSET.probability_matrix_montecarlo` or :func:`ASSET.probability_matrix_analytical`, of cumulative probability values between 0 and 1. The values are assumed to be uniformly distributed in the said range. filter_shape : tuple of int A pair of integers representing the kernel shape `(l, w)`. n_largest : int The number of the largest neighbors to collect for each entry in `jmat`. min_p_value : float, optional The minimum p-value in range `[0, 1)` for individual entries in `pmat`. Each `pmat[i, j]` is set to `min(pmat[i, j], 1-p_value_min)` to avoid that a single highly significant value in `pmat` (extreme case: `pmat[i, j] = 1`) yields joint significance of itself and its neighbors. Default: 1e-5. Returns ------- jmat : np.ndarray The joint probability matrix associated to `pmat`. """ l, w = filter_shape # Find for each P_ij in the probability matrix its neighbors and # maximize them by the maximum value 1-p_value_min pmat_neighb = _pmat_neighbors( pmat, filter_shape=filter_shape, n_largest=n_largest) pmat_neighb = np.minimum(pmat_neighb, 1. - min_p_value) # in order to avoid doing the same calculation multiple times: # find all unique sets of values in pmat_neighb # and store the corresponding indices # flatten the second and third dimension in order to use np.unique pmat_neighb = pmat_neighb.reshape(n_largest, pmat.size).T pmat_neighb, pmat_neighb_indices = np.unique(pmat_neighb, axis=0, return_inverse=True) # Compute the joint p-value matrix jpvmat n = l * (1 + 2 * w) - w * ( w + 1) # number of entries covered by kernel jpvmat = _jsf_uniform_orderstat_3d(pmat_neighb, n, verbose=self.verbose) # restore the original shape using the stored indices jpvmat = jpvmat[pmat_neighb_indices].reshape(pmat.shape) return 1. - jpvmat @staticmethod def mask_matrices(matrices, thresholds): """ Given a list of `matrices` and a list of `thresholds`, return a boolean matrix `B` ("mask") such that `B[i,j]` is True if each input matrix in the list strictly exceeds the corresponding threshold at that position. If multiple matrices are passed along with only one threshold the same threshold is applied to all matrices. Parameters ---------- matrices : list of np.ndarray The matrices which are compared to the respective thresholds to build the mask. All matrices must have the same shape. Typically, it is a list `[pmat, jmat]`, i.e., the (cumulative) probability and joint probability matrices. thresholds : float or list of float The significance thresholds for each matrix in `matrices`. Returns ------- mask : np.ndarray Boolean mask matrix with the shape of the input matrices. Raises ------ ValueError If `matrices` or `thresholds` is an empty list. If `matrices` and `thresholds` have different lengths. See Also -------- ASSET.probability_matrix_montecarlo : for `pmat` generation ASSET.probability_matrix_analytical : for `pmat` generation ASSET.joint_probability_matrix : for `jmat` generation """ if len(matrices) == 0: raise ValueError("Empty list of matrices") if isinstance(thresholds, float): thresholds = np.full(shape=len(matrices), fill_value=thresholds) if len(matrices) != len(thresholds): raise ValueError( '`matrices` and `thresholds` must have same length') mask = np.ones_like(matrices[0], dtype=bool) for (mat, thresh) in zip(matrices, thresholds): mask &= mat > thresh # Replace nans, coming from False * np.inf, with zeros mask[np.isnan(mask)] = False return mask @staticmethod def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors, stretch): r""" Given a matrix `mask_matrix`, replaces its positive elements with integers representing different cluster IDs. Each cluster comprises close-by elements. In ASSET analysis, `mask_matrix` is a thresholded ("masked") version of the intersection matrix `imat`, whose values are those of `imat` only if considered statistically significant, and zero otherwise. A cluster is built by pooling elements according to their distance, via the DBSCAN algorithm (see `sklearn.cluster.DBSCAN` class). Elements form a neighbourhood if at least one of them has a distance not larger than `max_distance` from the others, and if they are at least `min_neighbors`. Overlapping neighborhoods form a cluster: * Clusters are assigned integers from `1` to the total number `k` of clusters; * Unclustered ("isolated") positive elements of `mask_matrix` are assigned value `-1`; * Non-positive elements are assigned the value `0`. The distance between the positions of two positive elements in `mask_matrix` is given by a Euclidean metric which is stretched if the two positions are not aligned along the 45 degree direction (the main diagonal direction), as more, with maximal stretching along the anti-diagonal. Specifically, the Euclidean distance between positions `(i1, j1)` and `(i2, j2)` is stretched by a factor .. math:: 1 + (\mathtt{stretch} - 1.) * \left|\sin((\pi / 4) - \theta)\right|, where :math:`\theta` is the angle between the pixels and the 45 degree direction. The stretching factor thus varies between 1 and `stretch`. Parameters ---------- mask_matrix : np.ndarray The boolean matrix, whose elements with positive values are to be clustered. The output of :func:`ASSET.mask_matrices`. max_distance : float The maximum distance between two elements in `mask_matrix` to be a part of the same neighbourhood in the DBSCAN algorithm. min_neighbors : int The minimum number of elements to form a neighbourhood. stretch : float The stretching factor of the euclidean metric for elements aligned along the 135 degree direction (anti-diagonal). The actual stretching increases from 1 to `stretch` as the direction of the two elements moves from the 45 to the 135 degree direction. `stretch` must be greater than 1. Returns ------- cluster_mat : np.ndarray A matrix with the same shape of `mask_matrix`, each of whose elements is either: * a positive integer (cluster ID) if the element is part of a cluster; * `0` if the corresponding element in `mask_matrix` is non-positive; * `-1` if the element does not belong to any cluster. See Also -------- sklearn.cluster.DBSCAN """ # Don't do anything if mat is identically zero if np.all(mask_matrix == 0): return mask_matrix # List the significant pixels of mat in a 2-columns array xpos_sgnf, ypos_sgnf = np.where(mask_matrix > 0) # Compute the matrix D[i, j] of euclidean distances between pixels i # and j D = _stretched_metric_2d( xpos_sgnf, ypos_sgnf, stretch=stretch, ref_angle=45) # Cluster positions of significant pixels via dbscan core_samples, config = dbscan( D, eps=max_distance, min_samples=min_neighbors, metric='precomputed') # Construct the clustered matrix, where each element has value # * i = 1 to k if it belongs to a cluster i, # * 0 if it is not significant, # * -1 if it is significant but does not belong to any cluster cluster_mat = np.zeros_like(mask_matrix, dtype=np.int32) cluster_mat[xpos_sgnf, ypos_sgnf] = \ config * (config == -1) + (config + 1) * (config >= 0) return cluster_mat def extract_synchronous_events(self, cmat, ids=None): """ Given a list of spike trains, a bin size, and a clustered intersection matrix obtained from those spike trains via ASSET analysis, extracts the sequences of synchronous events (SSEs) corresponding to clustered elements in the cluster matrix. Parameters ---------- cmat: (n,n) np.ndarray The cluster matrix, the output of :func:`ASSET.cluster_matrix_entries`. ids : list, optional A list of spike train IDs. If provided, `ids[i]` is the identity of `spiketrains[i]`. If None, the IDs `0,1,...,n-1` are used. Default: None. Returns ------- sse_dict : dict A dictionary `D` of SSEs, where each SSE is a sub-dictionary `Dk`, `k=1,...,K`, where `K` is the max positive integer in `cmat` (i.e., the total number of clusters in `cmat`): .. centered:: D = {1: D1, 2: D2, ..., K: DK} Each sub-dictionary `Dk` represents the k-th diagonal structure (i.e., the k-th cluster) in `cmat`, and is of the form .. centered:: Dk = {(i1, j1): S1, (i2, j2): S2, ..., (iL, jL): SL}. The keys `(i, j)` represent the positions (time bin IDs) of all elements in `cmat` that compose the SSE (i.e., that take value `l` and therefore belong to the same cluster), and the values `Sk` are sets of neuron IDs representing a repeated synchronous event (i.e., spiking at time bins `i` and `j`). """ nr_worms = cmat.max() # number of different clusters ("worms") in cmat if nr_worms <= 0: return {} # Compute the transactions associated to the two binnings tracts_x = _transactions( self.spiketrains_i, bin_size=self.bin_size, t_start=self.t_start_i, t_stop=self.t_stop_i, ids=ids) if self.spiketrains_j is self.spiketrains_i: diag_id = 0 tracts_y = tracts_x else: if self.is_symmetric(): diag_id = 0 tracts_y = tracts_x else: diag_id = None tracts_y = _transactions( self.spiketrains_j, bin_size=self.bin_size, t_start=self.t_start_j, t_stop=self.t_stop_j, ids=ids) # Reconstruct each worm, link by link sse_dict = {} for k in range(1, nr_worms + 1): # for each worm # worm k is a list of links (each link will be 1 sublist) worm_k = {} pos_worm_k = np.array( np.where(cmat == k)).T # position of all links # if no link lies on the reference diagonal if all([y - x != diag_id for (x, y) in pos_worm_k]): for bin_x, bin_y in pos_worm_k: # for each link # reconstruct the link link_l = set(tracts_x[bin_x]).intersection( tracts_y[bin_y]) # and assign it to its pixel worm_k[(bin_x, bin_y)] = link_l sse_dict[k] = worm_k return sse_dict def _rate_of_binned_spiketrain(self, binned_spiketrains, kernel_width): """ Calculate the rate of binned spiketrains using convolution with a boxcar kernel. """ if self.verbose: print('compute rates by boxcar-kernel convolution...') # Create the boxcar kernel and convolve it with the binned spike trains k = int((kernel_width / self.bin_size).simplified.item()) kernel = np.full(k, fill_value=1. / k) rate = np.vstack([np.convolve(bst, kernel, mode='same') for bst in binned_spiketrains]) # The convolution results in an array decreasing at the borders due # to absence of spikes beyond the borders. Replace the first and last # (k//2) elements with the (k//2)-th / (n-k//2)-th ones, respectively k2 = k // 2 for i in range(rate.shape[0]): rate[i, :k2] = rate[i, k2] rate[i, -k2:] = rate[i, -k2 - 1] # Multiply the firing rates by the proper unit rate = rate * (1. / self.bin_size).rescale('Hz') return rate
JuliaSprenger/elephant
elephant/asset.py
Python
bsd-3-clause
72,742
[ "NEURON" ]
07f47335d2ef7ed6549fd79a03d9f60e2d737267373e4f3c1becb451468e7676
#!/usr/bin/env python import unittest import mlbgame from datetime import datetime class TestGame(unittest.TestCase): def test_day(self): games = mlbgame.day(2016, 8, 2) for game in games: if game.home_team == 'Mets': g = game self.assertIsInstance(game.away_team, str) self.assertIsInstance(game.away_team_errors, int) self.assertIsInstance(game.away_team_hits, int) self.assertIsInstance(game.away_team_runs, int) self.assertIsInstance(game.date, datetime) self.assertIsInstance(game.game_id, str) self.assertIsInstance(game.game_league, str) self.assertIsInstance(game.game_start_time, str) self.assertIsInstance(game.game_status, str) self.assertIsInstance(game.game_tag, str) self.assertIsInstance(game.home_team, str) self.assertIsInstance(game.home_team_errors, int) self.assertIsInstance(game.home_team_hits, int) self.assertIsInstance(game.home_team_runs, int) self.assertIsInstance(game.l_pitcher, str) self.assertIsInstance(game.l_pitcher_losses, int) self.assertIsInstance(game.l_pitcher_wins, int) self.assertIsInstance(game.l_team, str) self.assertIsInstance(game.sv_pitcher, str) self.assertIsInstance(game.sv_pitcher_saves, int) self.assertIsInstance(game.w_pitcher, str) self.assertIsInstance(game.w_pitcher_losses, int) self.assertIsInstance(game.w_pitcher_wins, int) self.assertIsInstance(game.w_team, str) self.assertIsInstance(game.nice_score(), str) game = g self.assertEqual(game.away_team, 'Yankees') self.assertEqual(game.away_team_errors, 2) self.assertEqual(game.away_team_hits, 6) self.assertEqual(game.away_team_runs, 1) self.assertEqual(game.date, datetime(2016, 8, 2, 19, 10)) self.assertEqual(game.game_id, '2016_08_02_nyamlb_nynmlb_1') self.assertEqual(game.game_league, 'AN') self.assertEqual(game.game_start_time, '07:10 pm') self.assertEqual(game.game_status, 'FINAL') self.assertEqual(game.game_tag, 'go_game') self.assertEqual(game.home_team, 'Mets') self.assertEqual(game.home_team_errors, 0) self.assertEqual(game.home_team_hits, 10) self.assertEqual(game.home_team_runs, 7) self.assertEqual(game.l_pitcher, 'Masahiro Tanaka') self.assertEqual(game.l_pitcher_losses, 4) self.assertEqual(game.l_pitcher_wins, 7) self.assertEqual(game.l_team, 'Yankees') self.assertEqual(game.sv_pitcher, '. ') self.assertEqual(game.sv_pitcher_saves, 0) self.assertEqual(game.w_pitcher, 'Jacob deGrom') self.assertEqual(game.w_pitcher_losses, 5) self.assertEqual(game.w_pitcher_wins, 7) self.assertEqual(game.w_team, 'Mets') self.assertEqual(game.__str__(), 'Yankees (1) at Mets (7)') def test_day_empty(self): games = mlbgame.day(1000, 1, 1) self.assertEqual(games, []) def test_games(self): games = mlbgame.games(2016, 7) self.assertIsInstance(games, list) for day in games: self.assertIsInstance(day, list) for game in day: self.assertIsInstance(game, mlbgame.game.GameScoreboard) games = mlbgame.combine_games(games) for game in games: self.assertIsInstance(game.away_team, str) self.assertIsInstance(game.away_team_errors, int) self.assertIsInstance(game.away_team_hits, int) self.assertIsInstance(game.away_team_runs, int) self.assertIsInstance(game.date, datetime) self.assertIsInstance(game.game_id, str) self.assertIsInstance(game.game_league, str) self.assertIsInstance(game.game_start_time, str) self.assertIsInstance(game.game_status, str) self.assertIsInstance(game.game_tag, str) self.assertIsInstance(game.home_team, str) self.assertIsInstance(game.home_team_errors, int) self.assertIsInstance(game.home_team_hits, int) self.assertIsInstance(game.home_team_runs, int) self.assertIsInstance(game.nice_score(), str) if game.game_tag == 'go_game': # skip canceled games, which don't have W/L attributes if game.home_team_runs == game.away_team_runs == 0: continue self.assertIsInstance(game.l_pitcher, str) self.assertIsInstance(game.l_pitcher_losses, int) self.assertIsInstance(game.l_pitcher_wins, int) self.assertIsInstance(game.l_team, str) self.assertIsInstance(game.sv_pitcher, str) self.assertIsInstance(game.sv_pitcher_saves, int) self.assertIsInstance(game.w_pitcher, str) self.assertIsInstance(game.w_pitcher_losses, int) self.assertIsInstance(game.w_pitcher_wins, int) self.assertIsInstance(game.w_team, str) def test_box_score(self): box_score = mlbgame.box_score('2016_08_02_nyamlb_nynmlb_1') self.assertEqual(box_score.game_id, '2016_08_02_nyamlb_nynmlb_1') self.assertIsInstance(box_score.innings, list) for inning in box_score: self.assertIn('inning', inning) self.assertIn('away', inning) self.assertIn('home', inning) self.assertEqual(box_score.innings[0]['inning'], 1) self.assertEqual(box_score.innings[0]['away'], 0) self.assertEqual(box_score.innings[0]['home'], 0) self.assertEqual(box_score.innings[1]['inning'], 2) self.assertEqual(box_score.innings[1]['away'], 0) self.assertEqual(box_score.innings[1]['home'], 0) self.assertEqual(box_score.innings[2]['inning'], 3) self.assertEqual(box_score.innings[2]['away'], 0) self.assertEqual(box_score.innings[2]['home'], 2) self.assertEqual(box_score.innings[3]['inning'], 4) self.assertEqual(box_score.innings[3]['away'], 0) self.assertEqual(box_score.innings[3]['home'], 0) self.assertEqual(box_score.innings[4]['inning'], 5) self.assertEqual(box_score.innings[4]['away'], 0) self.assertEqual(box_score.innings[4]['home'], 1) self.assertEqual(box_score.innings[5]['inning'], 6) self.assertEqual(box_score.innings[5]['away'], 0) self.assertEqual(box_score.innings[5]['home'], 0) self.assertEqual(box_score.innings[6]['inning'], 7) self.assertEqual(box_score.innings[6]['away'], 0) self.assertEqual(box_score.innings[6]['home'], 4) self.assertEqual(box_score.innings[7]['inning'], 8) self.assertEqual(box_score.innings[7]['away'], 0) self.assertEqual(box_score.innings[7]['home'], 0) self.assertEqual(box_score.innings[8]['inning'], 9) self.assertEqual(box_score.innings[8]['away'], 1) self.assertEqual(box_score.innings[8]['home'], 'x') self.assertEqual(box_score.print_scoreboard(), ( 'Inning\t1 2 3 4 5 6 7 8 9 \n' '---------------------------\n' 'Away\t0 0 0 0 0 0 0 0 1 \n' 'Home\t0 0 2 0 1 0 4 0 x ' )) def test_box_score_empty(self): self.assertRaises(ValueError, lambda: mlbgame.box_score('game_id')) self.assertRaises(ValueError, lambda: mlbgame.box_score('2016_08_02_nymlb_nymlb_1')) def test_overview(self): overview = mlbgame.overview('2016_08_02_nyamlb_nynmlb_1') self.assertEqual(overview.ampm, 'PM') self.assertEqual(overview.attendance, '42,819') self.assertEqual(overview.aw_lg_ampm, 'PM') self.assertEqual(overview.away_ampm, 'PM') self.assertEqual(overview.away_code, 'nya') self.assertEqual(overview.away_division, 'E') self.assertEqual(overview.away_file_code, 'nyy') self.assertEqual(overview.away_games_back, 9.0) self.assertEqual(overview.away_games_back_wildcard, 5.0) self.assertEqual(overview.away_league_id, 103) self.assertEqual(overview.away_loss, 53) self.assertEqual(overview.away_name_abbrev, 'NYY') self.assertEqual(overview.away_preview_link, '/mlb/gameday/index.jsp?gid=2016_08_02_nyamlb_nynmlb_1&mode=preview&c_id=mlb') self.assertIsInstance(overview.away_probable_pitcher_era, str) self.assertIsInstance(overview.away_probable_pitcher_first, str) self.assertIsInstance(overview.away_probable_pitcher_first_name, str) self.assertIsInstance(overview.away_probable_pitcher_id, str) self.assertIsInstance(overview.away_probable_pitcher_last, str) self.assertIsInstance(overview.away_probable_pitcher_last_name, str) self.assertIsInstance(overview.away_probable_pitcher_losses, str) self.assertIsInstance(overview.away_probable_pitcher_name_display_roster, str) self.assertIsInstance(overview.away_probable_pitcher_number, str) self.assertIsInstance(overview.away_probable_pitcher_s_era, str) self.assertIsInstance(overview.away_probable_pitcher_s_losses, str) self.assertIsInstance(overview.away_probable_pitcher_s_wins, str) self.assertIsInstance(overview.away_probable_pitcher_stats_season, str) self.assertIsInstance(overview.away_probable_pitcher_stats_type, str) self.assertIsInstance(overview.away_probable_pitcher_throwinghand, str) self.assertIsInstance(overview.away_probable_pitcher_wins, str) self.assertEqual(overview.away_recap_link, '/mlb/gameday/index.jsp?gid=2016_08_02_nyamlb_nynmlb_1&mode=recap&c_id=mlb') self.assertEqual(overview.away_sport_code, 'mlb') self.assertEqual(overview.away_team_city, 'NY Yankees') self.assertEqual(overview.away_team_errors, 2) self.assertEqual(overview.away_team_hits, 6) self.assertEqual(overview.away_team_id, 147) self.assertEqual(overview.away_team_name, 'Yankees') self.assertEqual(overview.away_team_runs, 1) self.assertEqual(overview.away_time, '7:10') self.assertEqual(overview.away_time_zone, 'ET') self.assertEqual(overview.away_win, 53) self.assertEqual(overview.balls, 0) self.assertEqual(overview.date, 'August 2, 2016') self.assertEqual(overview.day, 'TUE') self.assertEqual(overview.double_header_sw, 'N') self.assertEqual(overview.elapsed_time, '2:39') self.assertEqual(overview.first_pitch_et, '') self.assertEqual(overview.game_data_directory, '/components/game/mlb/year_2016/month_08/day_02/gid_2016_08_02_nyamlb_nynmlb_1') self.assertEqual(overview.game_id, '2016/08/02/nyamlb-nynmlb-1') self.assertEqual(overview.game_nbr, 1) self.assertEqual(overview.game_pk, 448453) self.assertEqual(overview.game_type, 'R') self.assertEqual(overview.gameday_link, '2016_08_02_nyamlb_nynmlb_1') self.assertEqual(overview.gameday_sw, 'P') self.assertEqual(overview.hm_lg_ampm, 'PM') self.assertEqual(overview.home_ampm, 'PM') self.assertEqual(overview.home_code, 'nyn') self.assertEqual(overview.home_division, 'E') self.assertEqual(overview.home_file_code, 'nym') self.assertEqual(overview.home_games_back, 8.0) self.assertEqual(overview.home_games_back_wildcard, '-') self.assertEqual(overview.home_league_id, 104) self.assertEqual(overview.home_loss, 51) self.assertEqual(overview.home_name_abbrev, 'NYM') self.assertEqual(overview.home_preview_link, '/mlb/gameday/index.jsp?gid=2016_08_02_nyamlb_nynmlb_1&mode=preview&c_id=mlb') self.assertIsInstance(overview.home_probable_pitcher_era, str) self.assertIsInstance(overview.home_probable_pitcher_first, str) self.assertIsInstance(overview.home_probable_pitcher_first_name, str) self.assertIsInstance(overview.home_probable_pitcher_id, str) self.assertIsInstance(overview.home_probable_pitcher_last, str) self.assertIsInstance(overview.home_probable_pitcher_last_name, str) self.assertIsInstance(overview.home_probable_pitcher_losses, str) self.assertIsInstance(overview.home_probable_pitcher_name_display_roster, str) self.assertIsInstance(overview.home_probable_pitcher_number, str) self.assertIsInstance(overview.home_probable_pitcher_s_era, str) self.assertIsInstance(overview.home_probable_pitcher_s_losses, str) self.assertIsInstance(overview.home_probable_pitcher_s_wins, str) self.assertIsInstance(overview.home_probable_pitcher_stats_season, str) self.assertIsInstance(overview.home_probable_pitcher_stats_type, str) self.assertIsInstance(overview.home_probable_pitcher_throwinghand, str) self.assertIsInstance(overview.home_probable_pitcher_wins, str) self.assertEqual(overview.home_recap_link, '/mlb/gameday/index.jsp?gid=2016_08_02_nyamlb_nynmlb_1&mode=recap&c_id=mlb') self.assertEqual(overview.home_sport_code, 'mlb') self.assertEqual(overview.home_team_city, 'NY Mets') self.assertEqual(overview.home_team_errors, 0) self.assertEqual(overview.home_team_hits, 10) self.assertEqual(overview.home_team_id, 121) self.assertEqual(overview.home_team_name, 'Mets') self.assertEqual(overview.home_team_runs, 7) self.assertEqual(overview.home_time, '7:10') self.assertEqual(overview.home_time_zone, 'ET') self.assertEqual(overview.home_win, 55) self.assertEqual(overview.id, '2016/08/02/nyamlb-nynmlb-1') self.assertEqual(overview.ind, 'F') self.assertEqual(overview.inning, 9) self.assertEqual(overview.inning_state, '') self.assertEqual(overview.is_no_hitter, 'N') self.assertEqual(overview.is_perfect_game, 'N') self.assertEqual(overview.league, 'AN') self.assertEqual(overview.location, 'Flushing, NY') self.assertEqual(overview.note, '') self.assertEqual(overview.official_scorer, 'Jordan Sprechman') self.assertEqual(overview.original_date, '2016/08/02') self.assertEqual(overview.outs, 3) self.assertEqual(overview.photos_link, '/mlb/gameday/index.jsp?gid=2016_08_02_nyamlb_nynmlb_1&mode=photos') self.assertEqual(overview.preview, '/mlb/gameday/index.jsp?gid=2016_08_02_nyamlb_nynmlb_1&mode=preview&c_id=mlb') self.assertEqual(overview.scheduled_innings, 9) self.assertEqual(overview.start_time, '7:11 PM') self.assertEqual(overview.status, 'Final') self.assertEqual(overview.status_ind, 'F') self.assertEqual(overview.strikes, 0) self.assertEqual(overview.tbd_flag, 'N') self.assertEqual(overview.tiebreaker_sw, 'N') self.assertEqual(overview.time, '7:10') self.assertEqual(overview.time_aw_lg, '7:10') self.assertEqual(overview.time_date, '2016/08/02 7:10') self.assertEqual(overview.time_date_aw_lg, '2016/08/02 7:10') self.assertEqual(overview.time_date_hm_lg, '2016/08/02 7:10') self.assertEqual(overview.time_hm_lg, '7:10') self.assertEqual(overview.time_zone, 'ET') self.assertEqual(overview.time_zone_aw_lg, -4) self.assertEqual(overview.time_zone_hm_lg, -4) self.assertEqual(overview.top_inning, 'Y') self.assertEqual(overview.tv_station, 'WPIX') self.assertEqual(overview.tz_aw_lg_gen, 'ET') self.assertEqual(overview.tz_hm_lg_gen, 'ET') self.assertEqual(overview.venue, 'Citi Field') self.assertEqual(overview.venue_id, 3289) self.assertEqual(overview.venue_name, 'Citi Field') self.assertEqual(overview.venue_w_chan_loc, 'USNY0504') self.assertEqual(overview.weather, '75 degrees, partly cloudy') self.assertEqual(overview.wind, '8 mph, R to L') self.assertEqual(overview.wrapup_link, '/mlb/gameday/index.jsp?gid=2016_08_02_nyamlb_nynmlb_1&mode=wrap&c_id=mlb') def test_overview_empty(self): self.assertRaises(ValueError, lambda: mlbgame.overview('game_id')) self.assertRaises(ValueError, lambda: mlbgame.overview('2016_08_02_nymlb_nymlb_1')) def test_players(self): players = mlbgame.players('2016_08_02_nyamlb_nynmlb_1') coaches = players.home_coaches + players.away_coaches umpires = players.umpires players = players.home_players + players.away_players for coach in coaches: self.assertIsInstance(coach.first, str) self.assertIsInstance(coach.id, int) self.assertIsInstance(coach.last, str) self.assertIsInstance(coach.num, (int, str)) self.assertIsInstance(coach.position, str) for player in players: self.assertIsInstance(player.avg, float) self.assertIsInstance(player.bats, str) self.assertIsInstance(player.boxname, str) self.assertIsInstance(player.first, str) self.assertIsInstance(player.hr, int) self.assertIsInstance(player.id, int) self.assertIsInstance(player.last, str) self.assertIsInstance(player.num, (int, str)) self.assertIsInstance(player.parent_team_abbrev, str) self.assertIsInstance(player.parent_team_id, int) self.assertIsInstance(player.position, str) self.assertIsInstance(player.rbi, int) self.assertIsInstance(player.rl, str) self.assertIsInstance(player.status, str) self.assertIsInstance(player.team_abbrev, str) self.assertIsInstance(player.team_id, int) for ump in umpires: self.assertIsInstance(ump.first, str) self.assertIsInstance(ump.id, int) self.assertIsInstance(ump.last, str) self.assertIsInstance(ump.name, str) self.assertIsInstance(ump.position, str) coach = coaches[0] player = players[0] ump = umpires[0] self.assertEqual(coach.first, 'Terry') self.assertEqual(coach.id, 492632) self.assertEqual(coach.last, 'Collins') self.assertEqual(coach.num, 10) self.assertEqual(coach.position, 'manager') self.assertEqual(player.avg, 0.079) self.assertEqual(player.bats, 'R') self.assertEqual(player.boxname, 'Colon') self.assertEqual(player.era, 3.58) self.assertEqual(player.first, 'Bartolo') self.assertEqual(player.hr, 1) self.assertEqual(player.id, 112526) self.assertEqual(player.last, 'Colon') self.assertEqual(player.losses, 6) self.assertEqual(player.num, 40) self.assertEqual(player.parent_team_abbrev, 'NYM') self.assertEqual(player.parent_team_id, 121) self.assertEqual(player.position, 'P') self.assertEqual(player.rbi, 2) self.assertEqual(player.rl, 'R') self.assertEqual(player.status, 'A') self.assertEqual(player.team_abbrev, 'NYM') self.assertEqual(player.team_id, 121) self.assertEqual(player.wins, 9) self.assertEqual(ump.first, 'Brian') self.assertEqual(ump.id, 427192) self.assertEqual(ump.last, 'Gorman') self.assertEqual(ump.name, 'Brian Gorman') self.assertEqual(ump.position, 'home') def test_players_empty(self): self.assertRaises(ValueError, lambda: mlbgame.players('game_id')) self.assertRaises(ValueError, lambda: mlbgame.players('2016_08_02_nymlb_nymlb_1')) def test_value_to_int(self): attrs = [{'away': ''}, {'not_here': 0}] for attr in attrs: self.assertEqual(0, mlbgame.game.value_to_int(attr, 'away')) attrs = [{'home': 3}, {'home': 'X'}] for attr in attrs: self.assertEqual(attr.get('home'), mlbgame.game.value_to_int(attr, 'home'))
panzarino/mlbgame
tests/test_game.py
Python
mit
20,130
[ "Brian" ]
a103e9843cf7ec88d4ac97abda116fc2a17a8ad7f44f667580baa70efa730a7f
# encoding: utf-8 """ Export (not only) geometry to various formats. """ from __future__ import print_function from builtins import zip from builtins import range from builtins import object from yade.wrapper import * from yade import utils,Matrix3,Vector3 #textExt=============================================================== def textExt(filename, format='x_y_z_r', comment='',mask=-1,attrs=[]): """Save sphere coordinates and other parameters into a text file in specific format. Non-spherical bodies are silently skipped. Users can add here their own specific format, giving meaningful names. The first file row will contain the format name. Be sure to add the same format specification in ymport.textExt. :param string filename: the name of the file, where sphere coordinates will be exported. :param string format: the name of output format. Supported 'x_y_z_r'(default), 'x_y_z_r_matId', 'x_y_z_r_attrs' (use proper comment) :param string comment: the text, which will be added as a comment at the top of file. If you want to create several lines of text, please use '\\\\n#' for next lines. With 'x_y_z_r_attrs' format, the last (or only) line should consist of column headers of quantities passed as attrs (1 comment word for scalars, 3 comment words for vectors and 9 comment words for matrices) :param int mask: export only spheres with the corresponding mask export only spheres with the corresponding mask :param [str] attrs: attributes to be exported with 'x_y_z_r_attrs' format. Each str in the list is evaluated for every body exported with body=b (i.e. 'b.state.pos.norm()' would stand for distance of body from coordinate system origin) :return: number of spheres which were written. :rtype: int """ O=Omega() try: out=open(filename,'w') except: raise RuntimeError("Problem to write into the file") count=0 # TODO use output=[] instrad of ''??? output = '' outputVel='' if (format!='liggghts_in'): output = '#format ' + format + '\n' if (comment): if format=='x_y_z_r_attrs': cmts = comment.split('\n') for cmt in cmts[:-1]: output += cmt output += '# x y z r ' + cmts[-1] + '\n' else: output += '# ' + comment + '\n' minCoord= Vector3.Zero maxCoord= Vector3.Zero maskNumber = [] for b in O.bodies: try: if (isinstance(b.shape,Sphere) and ((mask<0) or ((mask&b.mask)>0))): if (format=='x_y_z_r'): output+=('%g\t%g\t%g\t%g\n'%(b.state.pos[0],b.state.pos[1],b.state.pos[2],b.shape.radius)) elif (format=='x_y_z_r_matId'): output+=('%g\t%g\t%g\t%g\t%d\n'%(b.state.pos[0],b.state.pos[1],b.state.pos[2],b.shape.radius,b.material.id)) elif (format=='x_y_z_r_attrs'): output+=('%g\t%g\t%g\t%g'%(b.state.pos[0],b.state.pos[1],b.state.pos[2],b.shape.radius)) for cmd in attrs: v = eval(cmd) if isinstance(v,(int,float)): output+='\t%g'%v elif isinstance(v,Vector3): output+='\t%g\t%g\t%g'%tuple(v[i] for i in range(3)) elif isinstance(v,Matrix3): output+='\t%g'%tuple(v[i] for i in range(9)) output += '\n' elif (format=='id_x_y_z_r_matId'): output+=('%d\t%g\t%g\t%g\t%g\t%d\n'%(b.id,b.state.pos[0],b.state.pos[1],b.state.pos[2],b.shape.radius,b.material.id)) elif (format=='jointedPM'): output+=('%g\t%g\t%g\t%g\t%g\t%g\t%g\t%g\t%g\t%g\t%g\t%g\n'%(b.id,b.state.onJoint,b.state.joint,b.state.jointNormal1[0],b.state.jointNormal1[1],b.state.jointNormal1[2],b.state.jointNormal2[0],b.state.jointNormal2[1],b.state.jointNormal2[2],b.state.jointNormal3[0],b.state.jointNormal3[1],b.state.jointNormal3[2])) elif (format=='liggghts_in'): output+=('%g %g %g %g %g %g %g\n'%(count+1,b.mask,b.shape.radius,b.material.density,b.state.pos[0],b.state.pos[1],b.state.pos[2])) outputVel+=('%g %g %g %g %g %g %g\n'%(count+1,b.state.vel[0],b.state.vel[1],b.state.vel[2],b.state.angVel[0],b.state.angVel[1],b.state.angVel[2])) else: raise RuntimeError("Please, specify a correct format output!"); count+=1 if (count==1): minCoord = b.state.pos - Vector3(b.shape.radius,b.shape.radius,b.shape.radius) maxCoord = b.state.pos + Vector3(b.shape.radius,b.shape.radius,b.shape.radius) else: minCoord = Vector3(min(minCoord[0], b.state.pos[0]-b.shape.radius),min(minCoord[1], b.state.pos[1]-b.shape.radius),min(minCoord[2], b.state.pos[2]-b.shape.radius)) maxCoord = Vector3(max(maxCoord[0], b.state.pos[0]+b.shape.radius),max(maxCoord[1], b.state.pos[1]+b.shape.radius),max(minCoord[2], b.state.pos[2]+b.shape.radius)) if b.mask not in maskNumber: maskNumber.append(b.mask) except AttributeError: pass if (format=='liggghts_in'): outputHeader = 'LIGGGHTS Description\n\n' outputHeader += '%d atoms\n%d atom types\n\n'%(count,len(maskNumber)) outputHeader += '%g %g xlo xhi\n%g %g ylo yhi\n%g %g zlo zhi\n\n'%(minCoord[0],maxCoord[0],minCoord[1],maxCoord[1],minCoord[2],maxCoord[2]) output=outputHeader + 'Atoms\n\n' + output + '\nVelocities\n\n' + outputVel out.write(output) out.close() return count #textClumps=============================================================== def textClumps(filename, format='x_y_z_r_clumpId', comment='',mask=-1): """Save clumps-members into a text file. Non-clumps members are bodies are silently skipped. :param string filename: the name of the file, where sphere coordinates will be exported. :param string comment: the text, which will be added as a comment at the top of file. If you want to create several lines of text, please use '\\\\n#' for next lines. :param int mask: export only spheres with the corresponding mask export only spheres with the corresponding mask :return: number of clumps, number of spheres which were written. :rtype: int """ O=Omega() try: out=open(filename,'w') except: raise RuntimeError("Problem to write into the file") count=0 countClumps=0 output = '' output = '#format x_y_z_r_clumpId\n' if (comment): output += '# ' + comment + '\n' minCoord= Vector3.Zero maxCoord= Vector3.Zero maskNumber = [] for bC in O.bodies: if bC.isClump: keys = list(bC.shape.members.keys()) countClumps+=1 for ii in keys: try: b = O.bodies[ii] if (isinstance(b.shape,Sphere) and ((mask<0) or ((mask&b.mask)>0))): output+=('%g\t%g\t%g\t%g\t%g\n'%(b.state.pos[0],b.state.pos[1],b.state.pos[2],b.shape.radius,bC.id)) count+=1 except AttributeError: pass out.write(output) out.close() return countClumps,count #textPolyhedra=============================================================== def textPolyhedra(fileName, comment='',mask=-1, explanationComment=True,attrs=[]): """Save polyhedra into a text file. Non-polyhedra bodies are silently skipped. :param string filename: the name of the output file :param string comment: the text, which will be added as a comment at the top of file. If you want to create several lines of text, please use '\\\\n#' for next lines. :param int mask: export only polyhedra with the corresponding mask :param str explanationComment: inclde explanation of format to the beginning of file :return: number of polyhedra which were written. :rtype: int """ count = 0 f = open(fileName,'w') f.writelines('# %s\n'%l for l in [ 'YADE export of polyhedra.', 'Each polyhedron export contains first line with id, nuber of vertices and number of surfaces.', 'x,y,z coordinates of each vertex follows (each vertex on separate line).', 'ids if vertices of individual surfaces follows (numbering from 0, each surface on separate line).', '', 'Example of tetrahedron and cube with random ids:', '23 4 4', '0.1 0.2 0.3','1.3 0.1 -0.1','-0.2 1.2 0','0 -0.1 1.5', '0 2 1','0 3 2','0 1 3','1 2 3', '65 8 6', '4 0 0','5 0 0','4 1 0','5 1 0','4 0 1','5 0 1','4 1 1','5 1 1', '0 2 3 1','0 1 5 4','1 3 7 5','3 2 6 7','2 0 4 6','4 5 7 6', '', ]) if comment: f.write('#\n# %s\n'%comment) for b in O.bodies: if not isinstance(b.shape,Polyhedra) or not mask & b.mask: continue count += 1 vertices = [b.state.pos + b.state.ori*v for v in b.shape.v] surfaces = b.shape.GetSurfaces() strAttrs = '' if attrs: for cmd in attrs: v = eval(cmd) if isinstance(v,(int,float)): strAttrs+=' %g'%v elif isinstance(v,Vector3): strAttrs+=' %g %g %g'%tuple(v[i] for i in range(3)) elif isinstance(v,Matrix3): strAttrs+=' %g'%tuple(v[i] for i in range(9)) f.write('%d %d %d%s\n'%(b.id,len(vertices),len(surfaces),strAttrs)) f.writelines('%.8e %.8e %.8e\n'%(v[0],v[1],v[2]) for v in vertices) f.writelines(' '.join(str(i) for i in surface)+'\n' for surface in surfaces) f.close() return count #VTKWriter=============================================================== class VTKWriter(object): """ USAGE: create object vtk_writer = VTKWriter('base_file_name'), add to engines PyRunner with command='vtk_writer.snapshot()' """ def __init__(self,baseName='snapshot',startSnap=0): self.snapCount = startSnap self.baseName=baseName def snapshot(self): import xml.dom.minidom #import xml.dom.ext # python 2.5 and later positions=[]; radii=[] for b in Omega().bodies: if b.mold.name=='Sphere': positions.append(b.phys['se3'][0]) radii.append(b.mold['radius']) # Document and root element doc = xml.dom.minidom.Document() root_element = doc.createElementNS("VTK", "VTKFile") root_element.setAttribute("type", "UnstructuredGrid") root_element.setAttribute("version", "0.1") root_element.setAttribute("byte_order", "LittleEndian") doc.appendChild(root_element) # Unstructured grid element unstructuredGrid = doc.createElementNS("VTK", "UnstructuredGrid") root_element.appendChild(unstructuredGrid) # Piece 0 (only one) piece = doc.createElementNS("VTK", "Piece") piece.setAttribute("NumberOfPoints", str(len(positions))) piece.setAttribute("NumberOfCells", "0") unstructuredGrid.appendChild(piece) ### Points #### points = doc.createElementNS("VTK", "Points") piece.appendChild(points) # Point location data point_coords = doc.createElementNS("VTK", "DataArray") point_coords.setAttribute("type", "Float32") point_coords.setAttribute("format", "ascii") point_coords.setAttribute("NumberOfComponents", "3") points.appendChild(point_coords) string = str() for x,y,z in positions: string += repr(x) + ' ' + repr(y) + ' ' + repr(z) + ' ' point_coords_data = doc.createTextNode(string) point_coords.appendChild(point_coords_data) #### Cells #### cells = doc.createElementNS("VTK", "Cells") piece.appendChild(cells) # Cell locations cell_connectivity = doc.createElementNS("VTK", "DataArray") cell_connectivity.setAttribute("type", "Int32") cell_connectivity.setAttribute("Name", "connectivity") cell_connectivity.setAttribute("format", "ascii") cells.appendChild(cell_connectivity) # Cell location data connectivity = doc.createTextNode("0") cell_connectivity.appendChild(connectivity) cell_offsets = doc.createElementNS("VTK", "DataArray") cell_offsets.setAttribute("type", "Int32") cell_offsets.setAttribute("Name", "offsets") cell_offsets.setAttribute("format", "ascii") cells.appendChild(cell_offsets) offsets = doc.createTextNode("0") cell_offsets.appendChild(offsets) cell_types = doc.createElementNS("VTK", "DataArray") cell_types.setAttribute("type", "UInt8") cell_types.setAttribute("Name", "types") cell_types.setAttribute("format", "ascii") cells.appendChild(cell_types) types = doc.createTextNode("1") cell_types.appendChild(types) #### Data at Points #### point_data = doc.createElementNS("VTK", "PointData") piece.appendChild(point_data) # Particle radii if len(radii) > 0: radiiNode = doc.createElementNS("VTK", "DataArray") radiiNode.setAttribute("Name", "radii") radiiNode.setAttribute("type", "Float32") radiiNode.setAttribute("format", "ascii") point_data.appendChild(radiiNode) string = str() for r in radii: string += repr(r) + ' ' radiiData = doc.createTextNode(string) radiiNode.appendChild(radiiData) #### Cell data (dummy) #### cell_data = doc.createElementNS("VTK", "CellData") piece.appendChild(cell_data) # Write to file and exit outFile = open(self.baseName+'%08d'%self.snapCount+'.vtu', 'w') # xml.dom.ext.PrettyPrint(doc, file) doc.writexml(outFile, newl='\n') outFile.close() self.snapCount+=1 #text=============================================================== def text(filename,mask=-1): """Save sphere coordinates into a text file; the format of the line is: x y z r. Non-spherical bodies are silently skipped. Example added to examples/regular-sphere-pack/regular-sphere-pack.py :param string filename: the name of the file, where sphere coordinates will be exported. :param int mask: export only spheres with the corresponding mask :return: number of spheres which were written. :rtype: int """ return (textExt(filename=filename, format='x_y_z_r',mask=mask)) #VTKExporter=============================================================== class VTKExporter(object): """Class for exporting data to `VTK Simple Legacy File <https://www.vtk.org/VTK/img/file-formats.pdf>`_ (for example if, for some reason, you are not able to use :yref:`VTKRecorder`). Supported export of: * spheres * facets * polyhedra * interactions * contact points * periodic cell Usage: * create object ``vtkExporter = VTKExporter('baseFileName')``, * add to ``O.engines`` a ``PyRunner`` with ``command='vtkExporter.exportSomething(...)'`` * alternatively, just use ``vtkExporter.exportSomething(...)`` at the end of the script for instance Example: :ysrc:`examples/test/vtk-exporter/vtkExporter.py`, :ysrc:`examples/test/unv-read/unvReadVTKExport.py`. :param string baseName: name of the exported files. The files would be named, e.g., ``baseName-spheres-snapNb.vtk`` or ``baseName-facets-snapNb.vtk`` :param int startSnap: the numbering of files will start form ``startSnap`` """ # TODO comments def __init__(self,baseName,startSnap=0): self.spheresSnapCount = startSnap self.facetsSnapCount = startSnap self.intrsSnapCount = startSnap self.polyhedraSnapCount = startSnap self.contactPointsSnapCount = startSnap self.baseName = baseName # auxiliary functions def _checkWhatArgumentIsDict(self,what,funName,whatName="what"): """An auxiliary function, to be deleted when the 'deprecation period' is over""" if isinstance(what,(tuple,list)): raise DeprecationWarning("{}: '{}' argument is no longer list/tuple, but dict".format(funName,whatName)) assert isinstance(what,dict) def _warn(self,msg): print("Warning (yade.export.VTKExporter): " + msg) def _error(self,msg): print("ERROR (yade.export.VTKExporter): " + msg) def _getBodies(self,ids,type): allIds = False if isinstance(ids,str) and ids.lower()=='all': ids=range(len(O.bodies)) allIds = True bodies = [] for i in ids: b = O.bodies[i] if not b: continue if not isinstance(b.shape,type): if not allIds: self._warn("body %d is not of type %s"%(i,type)) continue bodies.append(b) if not bodies: self._warn("no bodies...") return bodies def _getInteractions(self,ids): if isinstance(ids,str) and ids.lower()=='all': ids = [(i.id1,i.id2) for i in O.interactions] intrs = [(i,j) for i,j in ids] if not intrs: self._warn("no interactions ...") return intrs def exportSpheres(self,ids='all',what={},comment="comment",numLabel=None,useRef=False): """exports spheres (positions and radius) and defined properties. :param [int]|"all" ids: if "all", then export all spheres, otherwise only spheres from integer list :param dictionary what: which additional quantities (other than the position and the radius) to export. parameter is name->command dictionary. Name is string under which it is save to vtk, command is string to evaluate. Note that the bodies are labeled as b in this function. Scalar, vector and tensor variables are supported. For example, to export velocity (with name particleVelocity) and the distance form point (0,0,0) (named as dist) you should write: ``what=dict(particleVelocity='b.state.vel',dist='b.state.pos.norm()', ... )`` :param string comment: comment to add to vtk file :param int numLabel: number of file (e.g. time step), if unspecified, the last used value + 1 will be used :param bool useRef: if False (default), use current position of the spheres for export, use reference position otherwise """ # get list of bodies to export bodies = self._getBodies(ids,Sphere) if not bodies: return nBodies = len(bodies) # output file fName = self.baseName+'-spheres-%08d'%(numLabel if numLabel else self.spheresSnapCount)+'.vtk' outFile = open(fName, 'w') # head outFile.write("# vtk DataFile Version 3.0.\n%s\nASCII\n\nDATASET POLYDATA\nPOINTS %d double\n"%(comment,nBodies)) # write position of spheres for b in bodies: pos = b.state.refPos if useRef else b.state.pos if not O.periodic else O.cell.wrap(b.state.pos) outFile.write("%g %g %g\n"%(pos[0],pos[1],pos[2])) # write radius outFile.write("\nPOINT_DATA %d\nSCALARS radius double 1\nLOOKUP_TABLE default\n"%(nBodies)) for b in bodies: outFile.write("%g\n"%(b.shape.radius)) # checks what argument self._checkWhatArgumentIsDict(what,"exportSpheres") # write additional data from 'what' param for name,command in what.items(): # for each name... test = eval(command) # ... eval one example to see what type (float, Vector3, Matrix3) the result is ... # ... and write appropriate header line and loop over all bodies and write appropriate vtk line(s) if isinstance(test,Matrix3): outFile.write("\nTENSORS %s double\n"%(name)) for b in bodies: t = eval(command) outFile.write("%g %g %g\n%g %g %g\n%g %g %g\n\n"%(t[0,0],t[0,1],t[0,2],t[1,0],t[1,1],t[1,2],t[2,0],t[2,1],t[2,2])) elif isinstance(test,Vector3): outFile.write("\nVECTORS %s double\n"%(name)) for b in bodies: v = eval(command) outFile.write("%g %g %g\n"%(v[0],v[1],v[2])) elif isinstance(test,(int,float)): outFile.write("\nSCALARS %s double 1\nLOOKUP_TABLE default\n"%(name)) for b in bodies: outFile.write("%g\n"%(eval(command))) else: self._warn("exportSpheres: wrong 'what' parameter, vtk output might be corrupted'") outFile.close() self.spheresSnapCount += 1 def exportFacets(self,ids='all',what={},comment="comment",numLabel=None): """ exports facets (positions) and defined properties. Facets are exported with multiplicated nodes :param [int]|"all" ids: if "all", then export all facets, otherwise only facets from integer list :param dictionary what: see :meth:`exportSpheres` :param string comment: comment to add to vtk file :param int numLabel: number of file (e.g. time step), if unspecified, the last used value + 1 will be used """ # get list of bodies to export bodies = self._getBodies(ids,Facet) if not bodies: return nBodies = len(bodies) # output file fName = self.baseName+'-facets-%08d'%(numLabel if numLabel else self.facetsSnapCount)+'.vtk' outFile = open(fName, 'w') # head outFile.write("# vtk DataFile Version 3.0.\n%s\nASCII\n\nDATASET POLYDATA\nPOINTS %d double\n"%(comment,3*nBodies)) # write vertices for b in bodies: p = b.state.pos o = b.state.ori s = b.shape pt1 = p + o*s.vertices[0] pt2 = p + o*s.vertices[1] pt3 = p + o*s.vertices[2] outFile.write("%g %g %g\n"%(pt1[0],pt1[1],pt1[2])) outFile.write("%g %g %g\n"%(pt2[0],pt2[1],pt2[2])) outFile.write("%g %g %g\n"%(pt3[0],pt3[1],pt3[2])) # write facets outFile.write("\nPOLYGONS %d %d\n"%(nBodies,4*nBodies)) i = 0 for b in bodies: outFile.write("3 %d %d %d\n"%(i,i+1,i+2)) i += 3 # checks what argument self._checkWhatArgumentIsDict(what,"exportFacets") # write additional data from 'what' param if what: outFile.write("\nCELL_DATA %d"%(nBodies)) # see exportSpheres for explanation of this code block for name,command in what.items(): test = eval(command) if isinstance(test,Matrix3): outFile.write("\nTENSORS %s double\n"%(name)) for b in bodies: t = eval(command) outFile.write("%g %g %g\n%g %g %g\n%g %g %g\n\n"%(t[0,0],t[0,1],t[0,2],t[1,0],t[1,1],t[1,2],t[2,0],t[2,1],t[2,2])) if isinstance(test,Vector3): outFile.write("\nVECTORS %s double\n"%(name)) for b in bodies: v = eval(command) outFile.write("%g %g %g\n"%(v[0],v[1],v[2])) else: outFile.write("\nSCALARS %s double 1\nLOOKUP_TABLE default\n"%(name)) for b in bodies: outFile.write("%g\n"%(eval(command))) outFile.close() self.facetsSnapCount += 1 def exportFacetsAsMesh(self,ids='all',connectivityTable=None,what={},comment="comment",numLabel=None): """ exports facets (positions) and defined properties. Facets are exported as mesh (not with multiplicated nodes). Therefore additional parameters connectivityTable is needed :param [int]|"all" ids: if "all", then export all facets, otherwise only facets from integer list :param dictionary what: see :meth:`exportSpheres` :param string comment: comment to add to vtk file :param int numLabel: number of file (e.g. time step), if unspecified, the last used value + 1 will be used :param [(float,float,float)|Vector3] nodes: list of coordinates of nodes :param [(int,int,int)] connectivityTable: list of node ids of individual elements (facets) """ # get list of bodies to export bodies = self._getBodies(ids,Facet) ids = [b.id for b in bodies] if not bodies: return nBodies = len(bodies) if connectivityTable is None: self._error("'connectivityTable' not specified") return if nBodies != len(connectivityTable): self._error("length of 'connectivityTable' does not match length of 'ids', no export") return # nodes nodes = [Vector3.Zero for i in range(max(max(e) for e in connectivityTable)+1)] for id,e in zip(ids,connectivityTable): b = bodies[id] p = b.state.pos o = b.state.ori s = b.shape pt1 = p + o*s.vertices[0] pt2 = p + o*s.vertices[1] pt3 = p + o*s.vertices[2] nodes[e[0]] = pt1 nodes[e[1]] = pt2 nodes[e[2]] = pt3 # output file fName = self.baseName+'-facets-%08d'%(numLabel if numLabel else self.facetsSnapCount)+'.vtk' outFile = open(fName, 'w') # head outFile.write("# vtk DataFile Version 3.0.\n%s\nASCII\n\nDATASET POLYDATA\nPOINTS %d double\n"%(comment,len(nodes))) # write vertices for node in nodes: outFile.write("%g %g %g\n"%(node[0],node[1],node[2])) # write facets outFile.write("\nPOLYGONS %d %d\n"%(len(connectivityTable),4*len(connectivityTable))) for e in connectivityTable: outFile.write("3 %d %d %d\n"%e) # checks what argument self._checkWhatArgumentIsDict(what,"exportFacetsAsMesh") # write additional data from 'what' param if what: outFile.write("\nCELL_DATA %d"%(nBodies)) # see exportSpheres for explanation of this code block for name,command in what.items(): test = eval(command) if isinstance(test,Matrix3): outFile.write("\nTENSORS %s double\n"%(name)) for b in bodies: t = eval(command) outFile.write("%g %g %g\n%g %g %g\n%g %g %g\n\n"%(t[0,0],t[0,1],t[0,2],t[1,0],t[1,1],t[1,2],t[2,0],t[2,1],t[2,2])) if isinstance(test,Vector3): outFile.write("\nVECTORS %s double\n"%(name)) for b in bodies: v = eval(command) outFile.write("%g %g %g\n"%(v[0],v[1],v[2])) else: outFile.write("\nSCALARS %s double 1\nLOOKUP_TABLE default\n"%(name)) for b in bodies: outFile.write("%g\n"%(eval(command))) outFile.close() self.facetsSnapCount += 1 def exportInteractions(self,ids='all',what={},verticesWhat={},comment="comment",numLabel=None,useRef=False): """exports interactions and defined properties. :param [(int,int)]|"all" ids: if "all", then export all interactions, otherwise only interactions from (int,int) list :param dictionary what: what to export. parameter is a name->command dictionary. Name is string under which it is saved to vtk, command is string to evaluate. Note that the interactions are labeled as i in this function. Scalar, vector and tensor variables are supported. For example, to export the stiffness difference (named as ``dStiff``) from a certain value (1e9) you should write: ``what=dict(dStiff='i.phys.kn-1e9', ... )`` :param dictionary verticesWhat: what to export on connected bodies. Bodies are labeled as ``b`` (or ``b1`` and ``b2`` if you need to treat both bodies differently) :param string comment: comment to add to vtk file :param int numLabel: number of file (e.g. time step), if unspecified, the last used value + 1 will be used :param bool useRef: if False (default), use current position of the bodies for export, use reference position otherwise """ # get list of interactions to export intrs = self._getInteractions(ids) if not intrs: return nIntrs = len(intrs) # output file fName = self.baseName+'-intrs-%08d'%(numLabel if numLabel else self.intrsSnapCount)+'.vtk' outFile = open(fName, 'w') # head outFile.write("# vtk DataFile Version 3.0.\n%s\nASCII\n\nDATASET POLYDATA\nPOINTS %d double\n"%(comment,2*nIntrs)) # write coords of intrs bodies (also taking into account possible periodicity for ii,jj in intrs: i = O.interactions[ii,jj] pos = O.bodies[ii].state.refPos if useRef else O.bodies[ii].state.pos outFile.write("%g %g %g\n"%(pos[0],pos[1],pos[2])) pos = (O.bodies[jj].state.refPos if useRef else O.bodies[jj].state.pos) + (O.cell.hSize*i.cellDist if O.periodic else Vector3.Zero) outFile.write("%g %g %g\n"%(pos[0],pos[1],pos[2])) # write interactions as lines outFile.write("LINES %d %d\n"%(nIntrs,3*nIntrs)) for j,i in enumerate(intrs): outFile.write("2 %d %d\n"%(2*j,2*j+1)) # checks what argument self._checkWhatArgumentIsDict(what,"exportInteractions") # write additional data from 'what' param if what: outFile.write("\nCELL_DATA %d\n"%(nIntrs)) for i in O.interactions: if i.isReal: break # see exportSpheres for explanation of this code block for name,command in what.items(): test = eval(command) if isinstance(test,Matrix3): outFile.write("\nTENSORS %s double\n"%(name)) for ii,jj in intrs: i = O.interactions[ii,jj] t = eval(command) outFile.write("%g %g %g\n%g %g %g\n%g %g %g\n\n"%(t[0,0],t[0,1],t[0,2],t[1,0],t[1,1],t[1,2],t[2,0],t[2,1],t[2,2])) elif isinstance(test,Vector3): outFile.write("\nVECTORS %s double\n"%(name)) for ii,jj in intrs: i = O.interactions[ii,jj] v = eval(command) outFile.write("%g %g %g\n"%(v[0],v[1],v[2])) elif isinstance(test,(int,float)): outFile.write("\nSCALARS %s double 1\nLOOKUP_TABLE default\n"%(name)) for ii,jj in intrs: i = O.interactions[ii,jj] outFile.write("%g\n"%(eval(command))) else: self._warn("exportInteractions: wrong 'what' parameter, vtk output might be corrupted") # checks verticesWhat argument self._checkWhatArgumentIsDict(verticesWhat,"exportInteractions","verticesWhat") # write additional data of bodies if verticesWhat: outFile.write("\nPOINT_DATA %d\n"%(2*nIntrs)) b = b1 = b2 = O.bodies[0] # see exportSpheres for explanation of this code block for name,vWhat in verticesWhat.items(): lw = len(vWhat) if lw == 1: command = vWhat test = eval(command) elif lw == 2: command1,command2 = vWhat test = eval(command1) if isinstance(test,Matrix3): outFile.write("\nTENSORS %s double\n"%(name)) for ii,jj in intrs: i = O.interactions[ii,jj] b1 = O.bodies[ii] b2 = O.bodies[jj] if lw==2: for b in (b1,b2): t = eval(command) outFile.write("%g %g %g\n%g %g %g\n%g %g %g\n\n"%(t[0,0],t[0,1],t[0,2],t[1,0],t[1,1],t[1,2],t[2,0],t[2,1],t[2,2])) elif lw==3: t1 = eval(command1) t2 = eval(command2) outFile.write("%g %g %g\n%g %g %g\n%g %g %g\n\n"%(t1[0,0],t1[0,1],t1[0,2],t1[1,0],t1[1,1],t1[1,2],t1[2,0],t1[2,1],t1[2,2])) outFile.write("%g %g %g\n%g %g %g\n%g %g %g\n\n"%(t2[0,0],t2[0,1],t2[0,2],t2[1,0],t2[1,1],t2[1,2],t2[2,0],t2[2,1],t2[2,2])) elif isinstance(test,Vector3): outFile.write("\nVECTORS %s double\n"%(name)) for ii,jj in intrs: i = O.interactions[ii,jj] b1 = O.bodies[ii] b2 = O.bodies[jj] if lw==2: for b in (b1,b2): v = eval(command) outFile.write("%g %g %g\n"%(v[0],v[1],v[2])) elif lw==3: v1 = eval(command1) v2 = eval(command2) outFile.write("%g %g %g\n"%(v1[0],v1[1],v1[2])) outFile.write("%g %g %g\n"%(v2[0],v2[1],v2[2])) elif isinstance(test,(int,float)): outFile.write("\nSCALARS %s double 1\nLOOKUP_TABLE default\n"%(name)) for ii,jj in intrs: i = O.interactions[ii,jj] b1 = O.bodies[ii] b2 = O.bodies[jj] if lw==2: for b in (b1,b2): outFile.write("%g\n"%(eval(command))) elif lw==3: outFile.write("%g\n"%(eval(command1))) outFile.write("%g\n"%(eval(command2))) else: self._warn("exportInteractions: wrong 'what' parameter, vtk output might be corrupted") outFile.close() self.intrsSnapCount += 1 def exportContactPoints(self,ids='all',what={},useRef={},comment="comment",numLabel=None): """exports contact points (CPs) and defined properties. :param [(int,int)] ids: see :meth:`exportInteractions` :param dictionary what: see :meth:`exportInteractions` :param bool useRef: see :meth:`exportInteractions` :param string comment: comment to add to vtk file :param int numLabel: number of file (e.g. time step), if unspecified, the last used value + 1 will be used """ # get list of interactions to export if useRef: useRef = dict(((i.id1,i.id2),v) for i,v in useRef.items()) intrs = list(useRef.keys()) else: intrs = self._getInteractions(ids) if not intrs: return nIntrs = len(intrs) # output file fName = self.baseName+'-cps-%08d'%(numLabel if numLabel else self.contactPointsSnapCount)+'.vtk' outFile = open(fName, 'w') # head outFile.write("# vtk DataFile Version 3.0.\n%s\nASCII\n\nDATASET POLYDATA\nPOINTS %d double\n"%(comment,nIntrs)) # write coords of contact points for ii,jj in intrs: if useRef: pos = useRef[(ii,jj)] else: i = O.interactions[ii,jj] pos = i.geom.contactPoint outFile.write("%g %g %g\n"%(pos[0],pos[1],pos[2])) # checks what argument self._checkWhatArgumentIsDict(what,"exportContactPoints") # see exportSpheres for explanation of this code block if what: outFile.write("\nPOINT_DATA %d\n"%(nIntrs)) for i in O.interactions: if i.isReal: break for name,command in what.items(): test = eval(command) if isinstance(test,Matrix3): outFile.write("\nTENSORS %s double\n"%(name)) for ii,jj in intrs: try: i = O.interactions[ii,jj] t = eval(command) except IndexError: t = Matrix3.Zero # TODO? outFile.write("%g %g %g\n%g %g %g\n%g %g %g\n\n"%(t[0,0],t[0,1],t[0,2],t[1,0],t[1,1],t[1,2],t[2,0],t[2,1],t[2,2])) elif isinstance(test,Vector3): outFile.write("\nVECTORS %s double\n"%(name)) for ii,jj in intrs: try: i = O.interactions[ii,jj] v = eval(command) except IndexError: v = Vector3.Zero # TODO? outFile.write("%g %g %g\n"%(v[0],v[1],v[2])) elif isinstance(test,(int,float)): outFile.write("\nSCALARS %s double 1\nLOOKUP_TABLE default\n"%(name)) for ii,jj in intrs: try: i = O.interactions[ii,jj] f = eval(command) except IndexError: f = 0. # TODO? outFile.write("%g\n"%(f)) else: self._warn("exportContacPoints: wrong 'what' parameter, vtk output might be corrupted'") outFile.close() self.contactPointsSnapCount += 1 def exportPeriodicCell(self,comment="comment",numLabel=None): """exports the :yref:`Cell` geometry for periodic simulations. :param string comment: comment to add to vtk file :param int numLabel: number of file (e.g. time step), if unspecified, the last used value + 1 will be used """ if not O.periodic: self._warn("exportPeriodicCell: scene is not periodic, no export...") return hSize = O.cell.hSize fName = self.baseName+'-periCell-%08d'%(numLabel if numLabel else self.intrsSnapCount)+'.vtk' outFile = open(fName, 'w') outFile.write("# vtk DataFile Version 3.0.\n%s\nASCII\n\nDATASET UNSTRUCTURED_GRID\nPOINTS 8 double\n"%(comment)) vertices = [ hSize*Vector3(0,0,1), hSize*Vector3(0,1,1), hSize*Vector3(1,1,1), hSize*Vector3(1,0,1), hSize*Vector3(0,0,0), hSize*Vector3(0,1,0), hSize*Vector3(1,1,0), hSize*Vector3(1,0,0), ] for v in vertices: outFile.write('%g %g %g\n'%(v[0],v[1],v[2])) outFile.write('\nCELLS 1 9\n') outFile.write('8 0 1 2 3 4 5 6 7\n') outFile.write('\nCELL_TYPES 1\n12\n') outFile.close() def exportPolyhedra(self,ids='all',what={},comment="comment",numLabel=None,useRef=False): """Exports polyhedrons and defined properties. :param ids: if "all", then export all polyhedrons, otherwise only polyhedrons from integer list :type ids: [int] | "all" :param dictionary what: which additional quantities (in addition to the positions) to export. parameter is name->command dictionary. Name is string under which it is saved to vtk, command is string to evaluate. Note that the bodies are labeled as b in this function. Scalar, vector and tensor variables are supported. For example, to export velocity (named as particleVelocity) and the distance from point (0,0,0) (named as dist) you should write: ``what=dict(particleVelocity='b.state.vel',dist='b.state.pos.norm()', ... )`` :param string comment: comment to add to vtk file :param int numLabel: number of file (e.g. time step), if unspecified, the last used value + 1 will be used """ # TODO useRef? # get list of bodies to export bodies = self._getBodies(ids,Polyhedra) # TODO if not bodies: return # number of vertices nVertices = sum(len(b.shape.v) for b in bodies) # export polyherda as a set of triangle faces bodyFaces = [] for b in bodies: ff = [] f = b.shape.GetSurfaceTriangulation() for i in range(len(f)//3): ff.append([f[3*i+j] for j in (0,1,2)]) bodyFaces.append(ff) # output file nFaces = sum(len(f) for f in bodyFaces) fName = self.baseName+'-polyhedra-%08d'%(numLabel if numLabel else self.polyhedraSnapCount)+'.vtk' outFile = open(fName, 'w') # head outFile.write("# vtk DataFile Version 3.0.\n%s\nASCII\n\nDATASET POLYDATA\nPOINTS %d double\n"%(comment,nVertices)) # write position of vertices if useRef: dspls = [] for b in bodies: bPos = b.state.pos bOri = b.state.ori brPos = b.state.refPos brOri = b.state.refOri for v in b.shape.v: rPos = brPos + brOri*v pos = bPos + bOri*v outFile.write("%g %g %g\n"%(rPos[0],rPos[1],rPos[2])) dspls.append(pos-rPos) else: for b in bodies: bPos = b.state.pos bOri = b.state.ori for v in b.shape.v: pos = bPos + bOri*v outFile.write("%g %g %g\n"%(pos[0],pos[1],pos[2])) # write triangle faces outFile.write("\nPOLYGONS %d %d\n"%(nFaces,4*nFaces)) j = 0 for i,b in enumerate(bodies): faces = bodyFaces[i] for face in faces: t = tuple([j+ii for ii in face]) outFile.write("3 %d %d %d\n"%t) j += len(b.shape.v) # checks what argument self._checkWhatArgumentIsDict(what,"exportPolyhedra") # write additional data from 'what' param if useRef: outFile.write("\nPOINT_DATA %d\n"%(len(dspls))) outFile.write("\nVECTORS displacement double\n") for v in dspls: outFile.write("%g %g %g\n"%(v[0],v[1],v[2])) if what: outFile.write("\nCELL_DATA %d"%(nFaces)) # see exportSpheres for explanation of this code block for name,command in what.items(): test = eval(command) if isinstance(test,Matrix3): outFile.write("\nTENSORS %s double\n"%(name)) for i,b in enumerate(bodies): t = eval(command) for f in bodyFaces[i]: outFile.write("%g %g %g\n%g %g %g\n%g %g %g\n\n"%(t[0,0],t[0,1],t[0,2],t[1,0],t[1,1],t[1,2],t[2,0],t[2,1],t[2,2])) elif isinstance(test,Vector3): outFile.write("\nVECTORS %s double\n"%(name)) for i,b in enumerate(bodies): v = eval(command) for f in bodyFaces[i]: outFile.write("%g %g %g\n"%(v[0],v[1],v[2])) elif isinstance(test,(int,float)): outFile.write("\nSCALARS %s double 1\nLOOKUP_TABLE default\n"%(name)) for i,b in enumerate(bodies): e = eval(command) for f in bodyFaces[i]: outFile.write("%g\n"%e) else: self._warn("exportPolyhedra: wrong 'what' parameter, vtk output might be corrupted") outFile.close() self.polyhedraSnapCount += 1 #gmshGeoExport=============================================================== def gmshGeo(filename, comment='',mask=-1,accuracy=-1): """Save spheres in geo-file for the following using in GMSH (http://www.geuz.org/gmsh/doc/texinfo/) program. The spheres can be there meshed. :param string filename: the name of the file, where sphere coordinates will be exported. :param int mask: export only spheres with the corresponding mask export only spheres with the corresponding mask :param float accuracy: the accuracy parameter, which will be set for the poinst in geo-file. By default: 1./10. of the minimal sphere diameter. :return: number of spheres which were exported. :rtype: int """ O=Omega() try: out=open(filename,'w') except: raise RuntimeError("Problem to write into the file") count=0 #out.write('#format \n') # Find the minimal diameter if (accuracy<0.0): dMin = -1.0 for b in O.bodies: try: if (isinstance(b.shape,Sphere) and ((mask<0) or ((mask&b.mask)>0))): if (((dMin>0.0) and (dMin>b.shape.radius*2.0)) or (dMin<0.0)): dMin = b.shape.radius*2.0 except AttributeError: pass accuracy = dMin/10.0 # Export bodies PTS = 0 CRS = 0 out.write('Acc = %g;\n'%(accuracy)) for b in O.bodies: try: if (isinstance(b.shape,Sphere) and ((mask<0) or ((mask&b.mask)>0))): r = b.shape.radius x = b.state.pos[0] y = b.state.pos[1] z = b.state.pos[2] out.write('Rad = %g;\n'%(r)) out.write('Point(%d) = {%g, %g, %g, Acc};\n\ Point(%d) = {%g, %g, %g, Acc};\n\ Point(%d) = {%g, %g, %g, Acc};\n\ Point(%d) = {%g, %g, %g, Acc};\n\ Point(%d) = {%g, %g, %g, Acc};\n\ Point(%d) = {%g, %g, %g, Acc};\n\ Point(%d) = {%g, %g, %g, Acc};\n\n'%( PTS+1, x, y, z, PTS+2, r+x, y, z, PTS+3, -r+x, y, z, PTS+4, x, y, r+z, PTS+5, x, y, -r+z, PTS+6, x, r+y, z, PTS+7, x, -r+y, z )) out.write('\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n\ Circle(%d) = {%d, %d, %d};\n'%( CRS+1, PTS+4, PTS+1, PTS+6, CRS+2, PTS+6, PTS+1, PTS+5, CRS+3, PTS+6, PTS+1, PTS+3, CRS+4, PTS+3, PTS+1, PTS+7, CRS+5, PTS+7, PTS+1, PTS+5, CRS+6, PTS+7, PTS+1, PTS+2, CRS+7, PTS+2, PTS+1, PTS+6, CRS+8, PTS+7, PTS+1, PTS+4, CRS+9, PTS+2, PTS+1, PTS+5, CRS+10, PTS+5, PTS+1, PTS+3, CRS+11, PTS+3, PTS+1, PTS+4, CRS+12, PTS+4, PTS+1, PTS+2, )) out.write('\n\ Line Loop(%d) = {%d, %d, %d}; Ruled Surface(%d) = {%d};\n\ Line Loop(%d) = {%d, %d, %d}; Ruled Surface(%d) = {%d};\n\ Line Loop(%d) = {%d, %d, %d}; Ruled Surface(%d) = {%d};\n\ Line Loop(%d) = {%d, %d, %d}; Ruled Surface(%d) = {%d};\n\ Line Loop(%d) = {%d, %d, %d}; Ruled Surface(%d) = {%d};\n\ Line Loop(%d) = {%d, %d, %d}; Ruled Surface(%d) = {%d};\n\ Line Loop(%d) = {%d, %d, %d}; Ruled Surface(%d) = {%d};\n\ Line Loop(%d) = {%d, %d, %d}; Ruled Surface(%d) = {%d};\n\n\ '%( (CRS+13), +(CRS+1), -(CRS+7), -(CRS+12), (CRS+14), (CRS+13), (CRS+15), +(CRS+7), +(CRS+2), -(CRS+9), (CRS+16), (CRS+15), (CRS+17), +(CRS+2), +(CRS+10), -(CRS+3), (CRS+18), (CRS+17), (CRS+19), +(CRS+3), +(CRS+11), +(CRS+1), (CRS+20), (CRS+19), (CRS+21), +(CRS+8), +(CRS+12), -(CRS+6), (CRS+22), (CRS+21), (CRS+23), +(CRS+4), +(CRS+8), -(CRS+11), (CRS+24), (CRS+23), (CRS+25), +(CRS+5), +(CRS+10), (CRS+4), (CRS+26), (CRS+25), (CRS+27), +(CRS+6), +(CRS+9), -(CRS+5), (CRS+28), (CRS+27), )) PTS+=7 CRS+=28 count+=1 except AttributeError: pass out.close() return count # external vtk manipulation =============================================================== def text2vtk(inFileName,outFileName): """Converts text file (created by :yref:`yade.export.textExt` function) into vtk file. See :ysrc:`examples/test/paraview-spheres-solid-section/export_text.py` example :param str inFileName: name of input text file :param str outFileName: name of output vtk file """ fin = open(inFileName) fout = open(outFileName,'w') lastLine = None line = '#' while line.startswith('#'): lastLine = line line = fin.readline() columns = lastLine.split()[5:] data = [line.split() for line in fin] fin.close() n = len(data) fout.write('# vtk DataFile Version 3.0.\ncomment\nASCII\n\nDATASET POLYDATA\nPOINTS %d double\n'%(n)) fout.writelines('%s %s %s\n'%(d[0],d[1],d[2]) for d in data) fout.write("\nPOINT_DATA %d\nSCALARS radius double 1\nLOOKUP_TABLE default\n"%(n)) fout.writelines('%s\n'%(d[3]) for d in data) for i,c in enumerate(columns): fout.write("\nSCALARS %s double 1\nLOOKUP_TABLE default\n"%(c)) fout.writelines('%s\n'%(d[4+i]) for d in data) fout.close() def text2vtkSection(inFileName,outFileName,point,normal=(1,0,0)): """Converts section through spheres from text file (created by :yref:`yade.export.textExt` function) into vtk file. See :ysrc:`examples/test/paraview-spheres-solid-section/export_text.py` example :param str inFileName: name of input text file :param str outFileName: name of output vtk file :param Vector3|(float,float,float) point: coordinates of a point lying on the section plane :param Vector3|(float,float,float) normal: normal vector of the section plane """ from math import sqrt norm = sqrt(pow(normal[0],2)+pow(normal[1],2)+pow(normal[2],2)) normal = (normal[0]/norm,normal[1]/norm,normal[2]/norm) # def computeD(point,normal): # from point and normal computes parameter d in plane equation ax+by+cz+d=0 return -normal[0]*point[0] - normal[1]*point[1] - normal[2]*point[2] def computeDistanceFromPlane(dat,point,normal,d=None): # computes distance of sphere dat from plane (point,normal) x,y,z = computeProjectionOnPlane(dat,point,normal,d) cx,cy,cz = dat[0],dat[1],dat[2] return sqrt(pow(x-cx,2)+pow(y-cy,2)+pow(z-cz,2)) def computeProjectionOnPlane(self,point,normal,d=None): # computes projection of sphere dat on plane (point,normal) if d is None: d = computeD(point,normal) nx,ny,nz = normal[0],normal[1],normal[2] cx,cy,cz = dat[0],dat[1],dat[2] t = (-d-nx*cx-ny*cy-nz*cz) / (nx*nx+ny*ny+nz*nz) x,y,z = cx+t*nx, cy+t*ny, cz+t*nz return x,y,z # fin = open(inFileName) lastLine = None line = '#' while line.startswith('#'): lastLine = line line = fin.readline() columns = lastLine.split()[4:] data = [[float(w) for w in line.split()] for line in fin] fin.close() # d = computeD(point,normal) circs = [] for dat in data: r = dat[3] dst = computeDistanceFromPlane(dat,point,normal,d) if dst > r: continue x,y,z = computeProjectionOnPlane(dat,point,normal,d) rNew = sqrt(r*r-dst*dst) dNew = [x,y,z,rNew,r] dNew.extend(dat[4:]) circs.append(dNew) n = len(circs) fout = open(outFileName,'w') fout.write('# vtk DataFile Version 3.0.\ncomment\nASCII\n\nDATASET POLYDATA\nPOINTS %d double\n'%(n)) fout.writelines('%g %g %g\n'%(c[0],c[1],c[2]) for c in circs) fout.write("\nPOINT_DATA %d\nSCALARS radius double 1\nLOOKUP_TABLE default\n"%(n)) fout.writelines('%g\n'%(c[3]) for c in circs) fout.write("\nSCALARS radiusOrig double 1\nLOOKUP_TABLE default\n") fout.writelines('%g\n'%(c[4]) for c in circs) fout.write("\nVECTORS normal double\n") fout.writelines("%g %g %g\n"%normal for i in circs) for i,c in enumerate(columns): fout.write("\nSCALARS %s double 1\nLOOKUP_TABLE default\n"%(c)) fout.writelines('%s\n'%(c[4+i]) for c in circs) fout.close()
yade/trunk
py/export.py
Python
gpl-2.0
44,375
[ "ParaView", "VTK" ]
a0f388e8ca6fd38200c8fcffaf25114aff506dbd797d7b8f5b14db2531b01ae4
""" Workaround for a bug introduced in 1.5.2 where all units got converted to lower case.... """ from cis.data_io.products import AProduct from cf_units import Unit import logging class flight_track(AProduct): def get_file_signature(self): return [r'.*\.nc'] def create_coords(self, filenames, usr_variable=None): from cis.data_io.netcdf import read_many_files_individually, get_metadata from cis.data_io.ungridded_data import UngriddedCoordinates, UngriddedData from cis.data_io.Coord import Coord, CoordList from cis.exceptions import InvalidVariableError variables = [("lon", "x", 'longitude'), ("lat", "y", 'latitude'), ("alt", "z", 'altitude'), ("time", "t", 'time'), ("p", "p", 'air_pressure')] logging.info("Listing coordinates: " + str(variables)) coords = CoordList() for variable in variables: try: var_data = read_many_files_individually(filenames, variable[0])[variable[0]] meta = get_metadata(var_data[0]) meta.standard_name = variable[2] # Some of the variables have an illegal name attribute... meta.misc.pop('name', None) c = Coord(var_data, meta, axis=variable[1]) if variable[1] == 'z': c.convert_units('m') coords.append(c) except InvalidVariableError: pass # Note - We don't need to convert this time coord as it should have been written in our # 'standard' time unit if usr_variable is None: res = UngriddedCoordinates(coords) else: usr_var_data = read_many_files_individually(filenames, usr_variable)[usr_variable] meta =get_metadata(usr_var_data[0]) # Some of the variables have an illegal name attribute... meta.misc.pop('name', None) res = UngriddedData(usr_var_data, meta, coords) return res def create_data_object(self, filenames, variable): return self.create_coords(filenames, variable) def get_file_format(self, filename): return "NetCDF/FlightTrack" def get_file_type_error(self, filename): """ Test that the file is of the correct signature :param filename: the file name for the file :return: list fo errors or None """ from cis.data_io.netcdf import get_netcdf_file_attributes atts = get_netcdf_file_attributes(filename) errors = None try: comment = atts['comment'] except KeyError as ex: errors = ['No comment attribute found in {}'.format(filename)] else: if "Converted by ukca_flight_to_netcdf.py" not in comment: errors = ['Comment ({}) does not match ukca_flight in {}'.format(comment, filename)] return errors
duncanwp/cis_plugins
ukca_flight_track.py
Python
lgpl-3.0
2,942
[ "NetCDF" ]
fd15d8cfb9c557e9feb893120368fb84db00d6e4795cd70dc817769c374a153f
#!/usr/bin/env python # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, see http://www.gnu.org/licenses/ # # @(#) $Id: $ # # small script to generate static content pages for the microsite # baseurl = 'http://www.geo.uzh.ch/microsite/3dveglab/index.html' template = ''' <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html lang="en" xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"> <head> <meta name="generator" content= "HTML Tidy for Linux (vers 25 March 2009), see www.w3.org" /> <meta http-equiv="Content-Type" content="text/html; charset=us-ascii" /> <meta http-equiv="Cache-Control" content= "no-transform,public,max-age=300,s-maxage=900" /> <title>3D Vegetation Lab</title> <base href="%s" /> <style type="text/css" media="screen, print, projection"> /*<![CDATA[*/ @import url(http://fonts.googleapis.com/css?family=Oxygen:400,700,300); html, body { margin:0; padding:0; color: #333; background:#fff; font-family: Arial,sans-serif; font-size: 14px; line-height: 22px; } #body { margin:0 auto; background:white; } #header { margin:0 auto; background:white; border-bottom: 3px solid #48A17E; } #header table { margin: 0 auto; width: 960px; padding: 30px 0;} #header table td.menu { text-align: right;} #header table a { text-decoration: none; font-family: 'Oxygen', sans-serif; font-weight: 700; text-transform: uppercase; font-size: 14px; padding-top: 10px; color: #4D4D4D; margin-right: 30px; } #header table a:last-child { margin-right: 0;} #header table a:hover { color: #C27638; border-bottom: 3px solid #C27638; } table.people { width: 55%%; } .logo img { width: 290px; height: auto;} a.logo:hover { border-bottom: 0px !important; } #main { background: url('./images/bg.png') repeat;} #content { margin: 0 auto; width: 960px;} #content-1 { margin:0 auto; float:left; width:204px; padding: 0px 10px; } #content-1 #content-2 { float:right; width:736px; } #content-2-1 { float:left; width:656px; min-height: 460px; padding: 20px 40px 60px; line-height: 25px; background:white; box-shadow: 0 4px 8px 0 rgba(77, 77, 77, 0.38);} #content-2-2 { float:right; width:80px; padding:10px; background:white; } #footer { margin:0 auto; background:white; border-top: 3px solid #48A17E; } #footer-wrapper { width: 960px; margin: 0 auto; } .logos { float: right; } .logos img { width: 140px; height: auto; display: inline-block; } .logos img:first-child { height: 46px; margin-right: 10px; width: auto; } .sidebar-links { margin: 5px 0 30px 0;} .sidebar-links a { color: #4D4D4D; line-height: 1.7em; margin-left: 10px; font-size: 14px; } .sidebar-links a:hover { color: #48A17E;} h3 { font-family: 'Oxygen', sans-serif; font-weight: 700; font-size: 15px; color: #48A17E; margin-bottom: 0px; text-transform: uppercase; } h2 { font-family: 'Oxygen', sans-serif; font-weight: 700; font-size: 25px; line-height: 30px; color: #C27638; margin: 0px 0px 20px 0px;} #content-2-1 h3 { font-size: 18px; margin-bottom: 8px; text-transform: none; } a { color: #4D4D4D; font-size: 14px; } a:hover { color: #48A17E; } .document { margin-bottom: 30px; font-size: 14px; line-height: 17px;} .document a { font-family: 'Oxygen', sans-serif; font-weight: 700; font-size: 16px; display: block; line-height: 22px; margin-bottom: 7px; color: #48A17E; text-decoration: none;} .document a:hover { text-decoration: underline;} .document i { font-size: 11px; display: block; margin-bottom: 5px; } .document.abstract i { font-size: 12px; line-height: 19px; } pre { border: 1px solid #F1EFEB; background-color: #F6F4F0; overflow: auto; text-align: left; margin: 0px; padding: 15px;} input[type="email"] { border: 1px solid #dedad8; display: block; font-family: Arial, sans-serif; font-size: 14px; height: 23px; line-height: 20px; outline: 0 none; padding: 7px 14px; width: 200px; text-align: center; } input[type="email"]:focus, input[type="email"]:hover { border: 1px solid #48a17e; } input[type="submit"] { background-color: #48a17e; border: 2px solid #48a17e; color: #fff; display: block; font-family: 'Oxygen',sans-serif; font-size: 13px; font-weight: 700; margin-top: 5px; outline: 0 none; padding: 8px; text-transform: uppercase; width: 230px; } .abstract { text-align: center; } figure img { width: 100%%; height: auto; margin: 30px 0; } /* http://positioniseverything.net/easyclearing.html) */ .cf:after { display:block; clear:both; height:0; visibility:hidden; content:" "; font-size:0; } /* use conditional comments for this bit if you want valid CSS */ .cf {*zoom:1;} /*]]>*/ </style> </style> </head> <body> <div id="body"> <div id="header" class="cf"> <table border="0" width="960"> <tr> <td width="1" rowspan="2"> <a href= "http://www.geo.uzh.ch/microsite/3dveglab/" target="_blank" class="logo"><img alt= "3d vegetation lab" height="90" src="http://www.geo.uzh.ch/microsite/3dveglab/software/veglablogo.png" /></a> </td> <td> <div class="logos"> <img src="http://www.esa.int/esalogo/images/downloads/Digital_logo/Hi_resolution/42_digital_logo_dark_blue_HI.png" /> <img src="http://www.uzh.ch/uzh/authoring/images/uzh_logo_e_pos_web_main.jpg" /> </div> </td> </tr> <tr> <td class="menu"> <a href="http://www.geo.uzh.ch/microsite/3dveglab/index.html">Project</a> <a href="consortium">Consortium</a> <a href="people">People</a> <a href="documents">Docs</a> <a href="sites">Sites</a> <a href="eod">EO Data</a> <a href="software">Software</a> </td> </tr> </table> </div> <div id="main" class="cf"> <div id="content"> <div id="content-1"> <h3>Sensors</h3> <div class="sidebar-links"> <a href= "https://earth.esa.int/web/guest/missions/3rd-party-missions/historical-missions/landsat-tmetm;jsessionid=20181E62B0A37B8CCD6613286FE6539F.eodisp-prod4040">LANDSAT</a><br /> <a href= "https://earth.esa.int/web/guest/missions/esa-operational-eo-missions/envisat/instruments/meris">MERIS</a><br /> <a href= "https://earth.esa.int/web/guest/missions/3rd-party-missions/current-missions/terraaqua-modis">MODIS</a><br /> <a href= "https://earth.esa.int/web/guest/missions/esa-future-missions/sentinel-2">Sentinel 2</a><br /> <a href= "https://earth.esa.int/web/guest/missions/esa-future-missions/sentinel-3">Sentinel 3</a><br /> </div> <h3>Components</h3> <div class="sidebar-links"> <a href= "http://www.brockmann-consult.de/cms/web/beam/">Beam</a><br /> <a href= "http://www.cesbio.ups-tlse.fr/us/dart/dart_contexte.html">DART</a><br /> <a href= "http://www2.geog.ucl.ac.uk/~plewis/librat/">librat</a><br /> <a href="http://www.libradtran.org">libRadtran</a><br /> </div> </div> <div id="content-2"> <div id="content-2-1"> <!-- CONTENT --> %s <!-- END CONTENT --> </div> </div> </div> </div> <div id="footer" class="cf"> <div id="footer-wrapper"> <p>Copyright &copy;2014</p> </div> </div> </div> </body> </html> ''' pageindex = ''' <h2 class="abstract">3D Vegetation Laboratory - reference site and benchmarking tool</h2> <figure class="abstract"> <img alt="3d Reconstruction" src= "http://www.geo.uzh.ch/microsite/3dveglab/graphics/stand_2.jpg" /> </figure> <div class="document abstract"> <i>Watch our video illustrating the 3d reconstruction! Download it <a href="http://www.geo.uzh.ch/microsite/3dveglab/VirtualForest.mov"> here </a></i> </div> <h3>Abstract</h3> The up-coming generation of ESA operational missions - the Sentinels - will enhance the capability to observe the vegetated surfaces of the Earth. Nevertheless the quantitative interpretation of the Earth Observation (EO) signal is a challenging task because vegetation is a complex and dynamic medium. Effects of horizontal and vertical heterogeneities and asymmetrical structures of vegetation as well as their high temporal dynamics are often neglected in the algorithm development, calibration and validation procedures. To better understand the scientific basis as well as the potential of future and upcoming missions we need detailed knowledge about the observed medium and the processes governing the radiative transfer. The combination of a realistic description of the medium in high detail together with a validated radiative transfer model was used to create a virtual lab mimicking reality which is capable to assess the potential of novel observation systems as well as to develop new algorithms and understand scaling issues from point measurements to the landscape. The advancement of ground based LiDAR systems provided information that contributed to describing and reconstructing forest stands in 3D down to the leaf/shoot level. Such detailed representations of the canopy structure and the distribution of leaves/branches within a 3D radiative transfer model now allow for the simulation of current and future missions in a controlled but realistic environment. It would thus offer an opportunity to test and develop dedicated applications to integrate EO into Earth system modeling. The 3D-VegtationLab has developed a concept for land surface reference sites, which was demonstrated for two selected pilot super-sites as a scientific support tool. The tool includes a standardized and comprehensive multi-temporal and multi-scale benchmark dataset together with a scientific toolbox based on a radiative transfer model. The 3D-Vegetation Lab provides the scientific community with a common benchmarking tool to develop, validate and compare biophysical EO products from space-borne missions with special attention to prepare for upcoming Sentinels. The 3D-VegetationLab was financed by ESA's STSE funding scheme, and partners were University College of London (UK), TU Wien (AUT), CESBIO Toulouse (FR) and Netcetera (CH). The main project outcomes are described in the final report (PDF) and presentation linked below. The main achievements of the project can be found under the tabs Sites <a href="sites">Sites</a>(3d world files), <a href="eod">EO Data</a> (multi-scale and -temporal EO data) and <a href="software">Software</a> (BEAM plugin encapsuling the RT models librat and DART). <h3>Important Documents</h3> <div class="document"> <a href= "http://www.geo.uzh.ch/microsite/3dveglab/documents/3DVegLab_FinalReport.pdf" target="_blank"> Final Report (PDF)</a> </div> <div class="document"> <a href= "http://www.geo.uzh.ch/microsite/3dveglab/documents/3DVegetationLaboratory_ProjectSummary.pdf" target="_blank"> Project Summary (Presentation)</a> </div> ''' pageconsortium = ''' <style type="text/css" media="screen, print, projection"> .menu a[href*="consortium"] { color: #C27638 !important; border-bottom: 3px solid #C27638; !important; } </style> <h2>Consortium</h2> <table border="0"> <tr> <td colspan="2"><h3>Prime Contractor</h3></td> </tr> <tr> <td><a href="http://www.geo.uzh.ch/en/units/rsl">University of Zurich - Remote Sensing Labs</a></td> <td><img alt="RSL" align="left" height="40" src= "http://www.geo.uzh.ch/microsite/sen4sci/img/RSL_logo.jpg" /></td> </tr> <tr> <td>&nbsp;</td> </tr> <tr> <td colspan="2"><h3>Sub-contractors</h3></td> </tr> <tr> <td><a href="http://www.geog.ucl.ac.uk/">University College London</a></td> <td><img alt="UCL" align="left" height="40" src= "http://sharp.cs.ucl.ac.uk/img/ucl_logo_2.jpg" /></td> </tr> <tr> <td><a href="http://www.cesbio.ups-tlse.fr/index_us.htm">Centre d'Etudes Spatiales de la BIOsph&egrave;re</a></td> <td><img alt="CESBIO" align="left" height="40" src= "http://www.cesbio.ups-tlse.fr/data_all/images/logo_cesbio.png" /></td> </tr> <tr> <td><a href="http://www.ipf.tuwien.ac.at/">Technische Universit&auml;t Wien</a></td> <td><img alt="TU Wien" align="left" height="40" src= "http://www.tuwien.ac.at/fileadmin/t/tuwien/downloads/cd/CD_NEU_2009/TU_Logos_2009/TU-Signet.png" /></td> </tr> <tr> <td><a href="http://tu-dresden.de/die_tu_dresden/fakultaeten/fakultaet_forst_geo_und_hydrowissenschaften/fachrichtung_geowissenschaften/ipf">Technische Universit&auml;t Dresden</a></td> <td><img alt="TU Dresden" align="left" height="40" src= "logo_blau.png" /></td> </tr> <tr> <td><a href="http://www.netcetera.com">Netcetera AG</a></td> <td><img alt="Netcetera" align="left" height="40" src= "http://netcetera.com/de/dms/images/logos/nca-logo-home.GIF" /></td> </tr> <tr> <td>&nbsp;</td> </tr> <tr> <td colspan="2"><h3>Sponsor</h3></td> </tr> <tr> <td><a href="http://www.esa.int/Our_Activities/Observing_the_Earth">European Space Agency - Earth Observation</a></td> <td><img alt="ESA" align="left" height="38" src= "http://www.esa.int/esalogo/images/downloads/Digital_logo/Hi_resolution/42_digital_logo_dark_blue_HI.png" /></td> </tr> </table> ''' pagepeople = ''' <style type="text/css" media="screen, print, projection"> .menu a[href*="people"] { color: #C27638 !important; border-bottom: 3px solid #C27638; !important; } </style> <h2>People</h2> <table class="people"> <tr> <td colspan="2"><h3>Team</h3></td> </tr> <tr> <td>Felix Morsdorf</td> <td>RSL</td> </tr> <tr> <td>Reik Leiterer</td> <td>RSL</td> </tr> <tr> <td>Fabian Schneider</td> <td>RSL</td> </tr> <tr> <td>Michael Schaepman</td> <td>RSL</td> </tr> <tr> <td colspan="2">&nbsp;</td> </tr> <tr> <td>Mathias Disney</td> <td>UCL</td> </tr> <tr> <td>Philip Lewis</td> <td>UCL</td> </tr> <tr> <td colspan="2">&nbsp;</td> </tr> <tr> <td>Jean-Philippe Gastellu-Etchegorry</td> <td>CESBIO</td> </tr> <tr> <td>Nicolas Lauret</td> <td>CESBIO</td> </tr> <tr> <td>Tristan Gregoire</td> <td>CESBIO</td> </tr> <tr> <td colspan="2">&nbsp;</td> </tr> <tr> <td>Norbert Pfeifer</td> <td>TU Wien</td> </tr> <tr> <td>Markus Hollaus</td> <td>TU Wien</td> </tr> <tr> <td colspan="2">&nbsp;</td> </tr> <tr> <td>Jason Brazile</td> <td>Netcetera Zurich</td> </tr> <tr> <td>Cyrill Schenkel</td> <td>Netcetera Zurich</td> </tr> <tr> <td colspan="2">&nbsp;</td> </tr> <tr> <td colspan="2"><h3>Consultants</h3></td> </tr> <tr> <td>Jan Clevers</td> <td>WUR</td> </tr> <tr> <td>Hans-Gerd Maas</td> <td>TU Dresden</td> </tr> <tr> <td>Anne Bienert</td> <td>TU Dresden</td> </tr> <tr> <td>Jean-Luc Widlowski</td> <td>IES</td> </tr> <tr> <td colspan="2">&nbsp;</td> </tr> <tr> <td colspan="2"><h3>ESA Technical Officer</h3></td> </tr> <tr> <td>Benjamin Koetz</td> <td>ESA-ERSIN</td> </tr> </table> ''' pagedocuments = ''' <style type="text/css" media="screen, print, projection"> .menu a[href*="documents"] { color: #C27638 !important; border-bottom: 3px solid #C27638; !important; } </style> <h2>Publications</h2> <div class="document"> <a href="http://www.sciencedirect.com/science/article/pii/S0034425714002284" target="_blank">Simulating imaging spectrometer data: 3D forest modeling based on LiDAR and in situ data</a> <i>Remote Sensing of Environment, Volume 152, September 2014, Pages 235-250</i> Fabian D. Schneider, Reik Leiterer, Felix Morsdorf, Jean-Philippe Gastellu-Etchegorry, Nicolas Lauret, Norbert Pfeifer, Michael E. Schaepman </div> <div class="document"> <a href="http://www.mdpi.com/2072-4292/5/11/5424" target="_blank">A Practical Approach for Extracting Tree Models in Forest Environments Based on Equirectangular Projections of Terrestrial Laser Scans</a> <i>Remote Sensing 2013, 5(11), 5424-5448; doi:10.3390/rs5115424</i> Lothar Eysn, Norbert Pfeifer, Camillo Ressl, Markus Hollaus, Andreas Grafl and Felix Morsdorf </div> <div class="document"> <a href= "https://www.schweizerbart.de/papers/pfg/detail/2013/79992/Operational_forest_structure_monitoring_using_airborne_laser_scanning" target="_blank"> Operational forest structure monitoring using airborne laser scanning </a> <i>Photogrammetrie, Fernerkundung, Geoinformation, 2013, 3, 173-184</i> Reik Leiterer, Werner Muecke, Felix Morsdorf, Markus Hollaus, Norbert Pfeifer & Michael E. Schaepman </div> <div class="document"> <a href="http://www.sciencedirect.com/science/article/pii/S0034425713002319" target="_blank">Radiative transfer modeling in the Earth-Atmosphere system with DART model </a> <i>Remote Sensing of Environment , 2013, 139, 149 - 170</i> Elay Grau & J.-P. Gastellu-Etchegorry </div> <div class="document"> <a href="http://www.sciencedirect.com/science/article/pii/S0034425714004568" target="_blank">Advanced radiometry measurements and Earth science applications with the Airborne Prism Experiment (APEX) </a> <i>Remote Sensing of Environment , 2015, 158, 207 - 219</i> Schaepman, M. E.; Jehle, M.; Hueni, A.; D'Odorico, P.; Damm, A.; Weyermann, J.; Schneider, F. D.; Laurent, V.; Popp, C.; Seidel, F. C.; Lenhard, K.; Gege, P.; Kuechler, C.; Brazile, J.; Kohler, P.; Vos, L. D.; Meuleman, K.; Meynart, R.; Schlaepfer, D.; Kneubuehler, M. & Itten, K. I. </div> <h2>Presentation</h2> <div class="document"> <a href= "http://www.geo.uzh.ch/microsite/3dveglab/documents/3DVegetationLaboratory_ProjectSummary.pdf" target="_blank"> Project Summary </a> </div> <h2>Documents</h2> <div class="document"> <a href= "http://www.geo.uzh.ch/microsite/3dveglab/documents/3DVegLab_FinalReport.pdf" target="_blank"> Final Report </a> </div> <div class="document"> <a href= "http://www.geo.uzh.ch/microsite/3dveglab/documents/3DVegLab_FP.pdf" target="_blank"> Field Protocol </a> </div> <div class="document"> <a href= "http://www.geo.uzh.ch/microsite/3dveglab/documents/3DVegLab_EOD.pdf" target="_blank"> Earth Observation data sets description</a> </div> <div class="document"> <a href= "http://www.geo.uzh.ch/microsite/3dveglab/documents/3DVegLab_ISD.pdf" target="_blank"> In-situ dataset description</a> </div> ''' pagesites = ''' <style type="text/css" media="screen, print, projection"> .menu a[href*="sites"] { color: #C27638 !important; border-bottom: 3px solid #C27638; !important; } </style> <h2>Site Download</h2> <form enctype="text/html" method="post" action="http://www.etc.ch/bin/cgiwrap/jason/registration"> Please provide your email address so that we may keep you informed of the latest updates to 3D Vegetation Lab activities.<br><br> <input type="email" name="regemail" placeholder="E-mail" required> <input type="submit" value="Go to Download Page &#9654;"> <input type="hidden" name="cmd" value="register"> </form> ''' pageeod = ''' <style type="text/css" media="screen, print, projection"> .menu a[href*="eod"] { color: #C27638 !important; border-bottom: 3px solid #C27638; !important; } </style> <h2>Earth Observation Data</h2> <figure class="abstract"> <img alt="EO Data Scales" src= "http://www.geo.uzh.ch/microsite/3dveglab/graphics/eod_scales.png" /> </figure> <h3>Description</h3> The project gathered a large number of EO datasets of the two different sites of the project which vary in their spatial and spectral characteristics. The aim is to facilitate cross- and up-scaling using the RT models of the toolbox. A subset of the EO data (~30MB) can be downloaded using the link below. If you are interested in the full data set (>70GB), please contact the PI of the project. <div class="document"> <a href= "http://www.geo.uzh.ch/microsite/3dveglab/eod/EOD_Laegern.zip" target="_blank"> Earth Observation Dataset </a> </div> ''' pagesoftware = ''' <style type="text/css" media="screen, print, projection"> .menu a[href*="software"] { color: #C27638 !important; border-bottom: 3px solid #C27638; !important; } </style> <h2>BEAM toolkit plugin</h2> <img alt="Beam 3dveglab plugin" src="http://www.geo.uzh.ch/microsite/3dveglab/graphics/vlab-screenshot.png" /> <blockquote> An <a href="https://github.com/netceteragroup/esa-beam">integrated plugin module</a> is available for version 4.11 of the <a href="http://www.brockmann-consult.de/cms/web/beam/">ESA BEAM Earth Observation Toolbox and Development Platform</a> <br> <b>Note:</b> This software plugin is functional <i><b>technically</b>, and was tested to work with both librat and DART under Windows and UNIX environments. The use of a 64bit operating system is recommended. More details on the testing and recommendations for running the toolbox examples can be found in our <a href= "http://www.geo.uzh.ch/microsite/3dveglab/software/3DVegLab_Toolbox_TestingProtocol.pdf" target="_blank">testing protocol </a> </i> </blockquote> <h3>Binary Installation</h3>Binary installation of the 3D Vegetation Lab plugin is automated by a command line Java installer (details below) which does the following: <ul> <li>copy (or replace) the plugin jar into BEAM's <tt>${BEAMHOME}/beam-4.11/modules</tt></li> <li>first-time batch run to install into BEAM's <tt>${HOME}/.beam/beam-vlab/auxdata/beam-vlab</tt></li> <li>fetch/unpack dependent software (e.g. DART, librat, libRadtran) into BEAM's <tt>${HOME}/.beam/beam-vlab/auxdata/beam-vlab</tt></li> <li>create command line wrappers for batch operation into BEAM's <tt>${BEAMHOME}/beam-4.11/bin/</tt></li> </ul> <h3>Binary Installation (windows)</h3> <b>Two pre-install steps:</b><br> 1. Visit the <a href="http://www.brockmann-consult.de/cms/web/beam/dlsurvey?p_p_id=downloadportlet_WAR_beamdownloadportlet10&amp;what=software/beam/4.11/beam_4.11_win32_installer.exe">windows 32-bit BEAM installer page</a> and download into your <b><tt>Downloads</tt></b> folder<br> 2. Save our <a href="http://www.geo.uzh.ch/microsite/3dveglab/software/3DVegLabInstaller.jar">3DVegLab plugin installer jar</a> file in your <b><tt>Downloads</tt></b> folder<br> <pre> rem press Windows-R to get the "run" prompt, then type "cmd" to get a shell cd %HOMEDRIVE%%HOMEPATH%\Downloads rem <b>Note</b>: when prompted, we suggest <b>C:\\data\\Program Files (x86)\\beam-4.11</b> rem because 3DVeglabInstaller.jar <i>will fail if Administrator access is needed</i> beam_4.11_win32_installer.exe move 3DVegLabInstaller.jar "C:\\data\\Program Files (x86)\\beam-4.11\\bin" cd /d "C:\\data\\Program Files (x86)\\beam-4.11\\bin" ..\\jre\\bin\\java -jar 3DVegLabInstaller.jar </pre> <h3>Binary Installation (linux)</h3> <b>Two pre-install steps:</b><br> 1. Visit the <a href="http://www.brockmann-consult.de/cms/web/beam/dlsurvey?p_p_id=downloadportlet_WAR_beamdownloadportlet10&amp;what=software/beam/4.11/beam_4.11_linux64_installer.sh">linux 64-bit BEAM installer page</a> and download into your <b><tt>Downloads</tt></b> folder<br> 2. Save our <a href="http://www.geo.uzh.ch/microsite/3dveglab/software/3DVegLabInstaller.jar">3DVegLab plugin installer jar</a> file in your <b><tt>Downloads</tt></b> folder<br> <pre> cd ${HOME}/Downloads sh beam_4.11_linux64_installer.sh mv 3DVegLabInstaller.jar ${HOME}/beam-4.11/bin cd ${HOME}/beam-4.11/bin ../jre/bin/java -jar 3DVegLabInstaller.jar </pre> <h3>Running the toolkit</h3> Once you have started BEAM (<b><tt>visat</tt></b>), click <b><tt>Tools/3D Vegetation Lab Processor...</tt></b> ''' pages = ( ('index.html', pageindex), ('consortium/index.html', pageconsortium), ('people/index.html', pagepeople), ('documents/index.html', pagedocuments), ('sites/index.html', pagesites), ('eod/index.html', pageeod), ('software/index.html', pagesoftware) ) class UTIL: def mkdirs(path): import sys if sys.platform.startswith('java'): from java.io import File if not File(path).isDirectory(): if not File(path).mkdirs(): raise RuntimeError('failed to mkdir %s' % path) else: import os try: os.stat(path) except: os.makedirs(path) mkdirs = staticmethod(mkdirs) # # generate all of the html pages # for page in pages: (fname, pg) = page idx = fname.find('/') if (idx > -1): dirs = fname[0:idx] UTIL.mkdirs(dirs) fp = open(fname, 'w') fp.write(template[1:] % (baseurl, pg[1:-1])) fp.close()
netceteragroup/esa-beam
beam-3dveglab-vlab/src/main/scripts/mk_microsite.py
Python
gpl-3.0
26,138
[ "VisIt" ]
33da724e6c5b4643e69762eeb59ff6f3341868eac6a16c7ec792fefdb6ff71cc
#!/usr/bin/env python import ujson as json import logging import zmq import cif.hunter from cifsdk.msg import Msg from cifsdk.client.zeromq import ZMQ as Client from cif.constants import HUNTER_ADDR, HUNTER_SINK_ADDR from csirtg_indicator import Indicator from csirtg_indicator.exceptions import InvalidIndicator import multiprocessing import os logger = logging.getLogger(__name__) SNDTIMEO = 15000 ZMQ_HWM = 1000000 EXCLUDE = os.environ.get('CIF_HUNTER_EXCLUDE', None) HUNTER_ADVANCED = os.getenv('CIF_HUNTER_ADVANCED', 0) HUNTER_MIN_CONFIDENCE = 4 HUNTER_RECURSION = os.getenv('CIF_HUNTER_RECURSION', 0) TRACE = os.environ.get('CIF_HUNTER_TRACE', False) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) if TRACE in [1, '1']: logger.setLevel(logging.DEBUG) class Hunter(multiprocessing.Process): def __enter__(self): return self def __exit__(self, type, value, traceback): return self def __init__(self, remote=HUNTER_ADDR, token=None): multiprocessing.Process.__init__(self) self.hunters = remote self.router = HUNTER_SINK_ADDR self.token = token self.exit = multiprocessing.Event() self.exclude = {} if EXCLUDE: for e in EXCLUDE.split(','): provider, tag = e.split(':') if not self.exclude.get(provider): self.exclude[provider] = set() logger.debug('setting hunter to skip: {}/{}'.format(provider, tag)) self.exclude[provider].add(tag) def _load_plugins(self): import pkgutil logger.debug('loading plugins...') plugins = [] for loader, modname, is_pkg in pkgutil.iter_modules(cif.hunter.__path__, 'cif.hunter.'): p = loader.find_module(modname).load_module(modname) plugins.append(p.Plugin()) logger.debug('plugin loaded: {}'.format(modname)) return plugins def terminate(self): self.exit.set() def start(self): router = Client(remote=self.router, token=self.token, nowait=True, autoclose=False) plugins = self._load_plugins() socket = zmq.Context().socket(zmq.PULL) socket.SNDTIMEO = SNDTIMEO socket.set_hwm(ZMQ_HWM) logger.debug('connecting to {}'.format(self.hunters)) socket.connect(self.hunters) logger.debug('starting hunter') poller = zmq.Poller() poller.register(socket, zmq.POLLIN) while not self.exit.is_set(): try: s = dict(poller.poll(1000)) except SystemExit or KeyboardInterrupt: break if socket not in s: continue id, token, mtype, data = Msg().recv(socket) data = json.loads(data) if isinstance(data, dict): if not data.get('indicator'): continue if not data.get('itype'): try: data = Indicator( indicator=data['indicator'], tags='search', confidence=10, group='everyone', tlp='amber', ).__dict__() except InvalidIndicator: logger.debug('skipping invalid indicator: {}'.format(data['indicator'])) continue if not data.get('tags'): data['tags'] = [] data = [data] token = json.loads(token) for d in data: d = Indicator(**d) if d.confidence < HUNTER_MIN_CONFIDENCE: continue # prevent hunter recursion if disabled if not HUNTER_RECURSION and d.tags and 'hunter' in d.tags: continue if d.indicator in ["", 'localhost', 'example.com']: continue if self.exclude.get(d.provider): for t in d.tags: if t in self.exclude[d.provider]: logger.debug('skipping: {}'.format(d.indicator)) continue for p in plugins: if p.is_advanced: if not HUNTER_ADVANCED: continue try: p.process(i=d, router=router, user_token=token) except Exception as e: logger.error(e) logger.error('[{}] giving up on: {}'.format(p, d))
csirtgadgets/bearded-avenger
cif/hunter/__init__.py
Python
mpl-2.0
4,715
[ "Amber" ]
da3ee61fd64eba19e1864444c52700839f73bcc13f1e9fc48be48f4f666e40b9
#!/usr/bin/env python import random import itertools import rospy from visualization_msgs.msg import MarkerArray, Marker from geometry_msgs.msg import Point, Quaternion from std_msgs.msg import UInt8, Float64 import numpy as np import math from sklearn.cluster import KMeans, DBSCAN from sklearn import svm from sklearn.linear_model import LinearRegression from move_base_loiter import Loiter from move_base_waypoint import MoveTo from nav_msgs.msg import Odometry class Pinger(object): """ find coordinate of totem for task1 """ red_list, green_list, white_list, black_list = list(), list(), list(), list() MAX_LENS = 20 # actually 21 map_dim = [[0, 40], [0, 40]] pinger_list = list() pinger_center = list() red_center, green_center, black_center, white_center = list(), list(), list(), list() x0, y0, yaw0 = 0, 0, 0 initial_position = list() def __init__(self, nodename="pinger_planner"): rospy.init_node(nodename) rospy.on_shutdown(self.shutdown) self.rate = rospy.get_param("~rate", 1) self.kmeans = KMeans(n_clusters=2) self.ocsvm = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) self.moveto = MoveTo("moveto", is_newnode=False, target=None, mode=1, mode_param=1, is_relative=False) self.loiter = Loiter("loiter", is_newnode=False, target=None, is_relative=False) # Subscribe to marker array publisher self.odom_received = False rospy.Subscriber("odometry/filtered/global", Odometry, self.odom_callback, queue_size=None) while not self.odom_received: pass rospy.Subscriber("filtered_marker_array", MarkerArray, self.markerarray_callback, queue_size=10) # rospy.Subscriber("pinger", Float64, self.pinger_callback, queue_size=10) rospy.Subscriber("pinger", UInt8, self.pinger_callback, queue_size=10) self.pinger_threshold = 65 self.entry_distance = 2 self.exited = False self.gate_totem_find = False self.pinger_find = False self.center_gate = list() # later need to fill in self.entry_gate = list() # later need to fill in self.exit_gate = list() # later need to fill in self.hold_loiter = False self.hold_mv = False # while not self.x0: self.initial_position = [self.x0, self.y0] self.planner() def planner(self): """ find the totems except black, line all of them, calculate the center, then find the gate entrance point, use the point to station keep and if it has pinger, enter and find the black totem, do a loiter on the totem and go back from the pinger's gate """ r = rospy.Rate(self.rate) self.loiter_target = list() self.mv_target = list() while not self.gate_totem_find: if not self.red_center or not self.green_center or len(self.white_center) < 2: print "a" self.moveto.respawn(self.random_walk("before_line")) else: print "b" self.moveto.respawn(self.random_walk("near_line")) r.sleep() else: # find the gate totems # # get the gate line, done immediatedly after gate totem find print "gate totem find" while not self.pinger_find: print "go along line" self.moveto.respawn(self.random_walk("along_line")) r.sleep() else: print "pinger find" # find the entry point who has pinger self.locate_pinger_gate() print "enter gate" self.moveto.respawn(self.exit_gate) self.moveto.respawn(self.center_gate) print self.entry_gate self.moveto.respawn(self.entry_gate) while not self.black_totem_find: # need to random walk after line print "find black" self.moveto.respawn(self.random_walk("after_line")) r.sleep() else: # find the black totem # loiter the black totem print "loiter" print self.black_center self.loiter.respawn(self.black_center) # followed by exit the gate print "exit" self.moveto.respawn(self.entry_gate) self.moveto.respawn(self.center_gate) print self.exit_gate self.moveto.respawn(self.exit_gate) # exit complete print "complete" def odom_callback(self, msg): """ call back to subscribe, get odometry data: pose and orientation of the current boat, suffix 0 is for origin """ self.x0 = msg.pose.pose.position.x self.y0 = msg.pose.pose.position.y self.odom_received = True def pinger_callback(self, msg): """ get pinger information """ # if msg.data > self.pinger_threshold if msg.data == 1: self.pinger_list.append([self.x0, self.y0]) self.find_pinger_center() def find_pinger_center(self): if len(self.pinger_list) >= self.MAX_LENS: self.pinger_center = self.one_class_svm(self.pinger_list) self.pinger_find = True else: self.pinger_find = False print "pinger not find" def markerarray_callback(self, msg): """ calculate average over accumulate """ # accumulate data for i in range(len(msg.markers)): if msg.markers[i].id == 0: # red if len(self.red_list) > self.MAX_LENS: self.red_list.pop(0) self.red_list.append([msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]) elif msg.markers[i].id == 1: # green if len(self.green_list) > self.MAX_LENS: self.green_list.pop(0) self.green_list.append([msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]) elif msg.markers[i].id == 4: # white if len(self.white_list) > 2 * self.MAX_LENS: self.white_list.pop(0) self.white_list.append([msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]) elif msg.markers[i].id == 3: # black if len(self.black_list) > self.MAX_LENS: self.black_list.pop(0) self.black_list.append([msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]) # print self.red_list, self.white_list, self.green_list self.find_gateline() def find_gateline(self): # define regions for before, after and along the line # find the center if len(self.red_list) >= self.MAX_LENS: self.red_center = self.one_class_svm(self.red_list) if len(self.green_list) >= self.MAX_LENS: self.green_center = self.one_class_svm(self.green_list) if len(self.black_list) >= self.MAX_LENS: self.black_center = self.one_class_svm(self.black_list) self.black_totem_find = True # for planner if len(self.white_list) >= 2 * self.MAX_LENS: self.kmeans.fit(self.white_list) self.white_center = self.kmeans.cluster_centers_ # print self.red_center, self.white_center, self.green_center # find gate entrance if self.red_center != [] and len(self.white_center) == 2 and self.green_center != []: # all gate detected coordinates = np.concatenate(([self.red_center], self.white_center, [self.green_center])) coord_x, coord_y = coordinates[:,0], coordinates[:,1] self.gateline = LinearRegression() # least square to get the gate line self.gateline.fit(coord_x.reshape((coord_x.shape[0],1)), coord_y) # k = self.gateline.coef_, b = self.gateline.intercept_ # find the gate entry points # find the before the line sign: # "1" is the area where before line # "-1" is the area where after line self.before_line_sign = np.sign(self.gateline.predict([self.initial_position[0]]) - self.initial_position[1]) self.find_gate_entry() self.gate_totem_find = True elif self.red_list or self.green_list or self.white_list: # use currently available data coordinates = [[self.x0, self.y0]] if self.red_list: coordinates = np.concatenate((coordinates, self.red_list)) if self.white_list: coordinates = np.concatenate((coordinates, self.white_list)) if self.green_list: coordinates = np.concatenate((coordinates, self.green_list)) coord_x, coord_y = coordinates[:,0], coordinates[:,1] self.roughline = LinearRegression() # least square to get the gate line self.roughline.fit(coord_x.reshape((coord_x.shape[0],1)), coord_y) self.before_roughline_sign = np.sign(self.roughline.predict([self.initial_position[0]]) - self.initial_position[1]) else: # no data pass def find_gate_entry(self): # this function will be called when gate_totem_find and pinger_find # find red and white separations if self.distance(self.red_center, self.white_center[0]) < self.distance(self.red_center, self.white_center[1]): self.rwc = [np.mean([self.white_center[0,0], self.red_center[0]]), np.mean([self.white_center[0,1], self.red_center[1]])] else: self.rwc = [np.mean([self.white_center[1,0], self.red_center[0]]), np.mean([self.white_center[1,1], self.red_center[1]])] if self.distance(self.green_center, self.white_center[0]) < self.distance(self.green_center, self.white_center[1]): self.gwc = [np.mean([self.white_center[0,0], self.green_center[0]]), np.mean([self.white_center[0,1], self.green_center[1]])] else: self.gwc = [np.mean([self.white_center[1,0], self.green_center[0]]), np.mean([self.white_center[1,1], self.green_center[1]])] self.wwc = [np.mean([self.white_center[1,0], self.white_center[0,0]]), np.mean([self.white_center[1,1], self.white_center[0,1]])] self.rwl, self.wwl, self.gwl, self.rwlm, self.wwlm, self.gwlm = self.find_listen_point() def locate_pinger_gate(self): # find the gate now # self.find_pinger_center() # find pinger center # shortest distance to the identified gate center print "pinger center", self.pinger_center print "rwl", self.rwl pinger_to_entry_distance = [self.distance(self.pinger_center, self.rwl), self.distance(self.pinger_center, self.wwl), self.distance(self.pinger_center, self.gwl)] shortest_d = np.argmin(pinger_to_entry_distance) # entry_gate identified # must be inside # exit gate must be outside self.center_gate = [self.rwc, self.wwc, self.gwc][shortest_d] + [0] self.entry_gate = [self.rwlm, self.wwlm, self.gwlm][shortest_d] + [0] self.exit_gate = [self.rwl, self.wwl, self.gwl][shortest_d] + [0] def find_listen_point(self): # find pinger listening point and mirrored point # listen point is for detecting pinger, and mirrored point is to supply the direction # first is to find the two point that is with d to the gate entry theta = -1.0 / self.gateline.coef_ rw = [[self.rwc[0] + self.entry_distance * math.cos(theta), self.rwc[1] + self.entry_distance * math.sin(theta)], [self.rwc[0] - self.entry_distance * math.cos(theta), self.rwc[1] - self.entry_distance * math.sin(theta)]] ww = [[self.wwc[0] + self.entry_distance * math.cos(theta), self.wwc[1] + self.entry_distance * math.sin(theta)], [self.wwc[0] - self.entry_distance * math.cos(theta), self.wwc[1] - self.entry_distance * math.sin(theta)]] gw = [[self.gwc[0] + self.entry_distance * math.cos(theta), self.gwc[1] + self.entry_distance * math.sin(theta)], [self.gwc[0] - self.entry_distance * math.cos(theta), self.gwc[1] - self.entry_distance * math.sin(theta)]] # l is listening point, lm is the mirrored point if self.before_line_sign * (self.gateline.predict([rw[0][0]]) - rw[0][1]) > 0: rwl = rw[0] rwlm = rw[1] else: rwl = rw[1] rwlm = rw[0] if self.before_line_sign * (self.gateline.predict([gw[0][0]]) - gw[0][1]) > 0: gwl = gw[0] gwlm = gw[1] else: gwl = gw[1] gwlm = gw[0] if self.before_line_sign * (self.gateline.predict([ww[0][0]]) - ww[0][1]) > 0: wwl = ww[0] wwlm = ww[1] else: wwl = ww[1] wwlm = ww[0] return rwl, wwl, gwl, rwlm, wwlm, gwlm def distance(self, a, b): return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2) def one_class_svm(self, data_list): """ return support vector and thus cluster center """ data_list = np.array(data_list) self.ocsvm.fit(data_list) sv = self.ocsvm.support_vectors_ # find the sv's centroid, assume only one cluster. return [np.mean(sv[:,0]), np.mean(sv[:,1])] # return (np.median(sv[:,0]), np.median(sv[:,1])) def update_hold_moveto(self, hold_mv): self.hold_mv = hold_mv def update_hold_loiter(self, hold_loiter): self.hold_loiter = hold_loiter def random_walk(self, style): """ create random walk points and avoid valid centers """ delta_y = 3 target = None if style == "before_line": # this time no gate data is known # do a gaussian distribution with center be the boat's current position and # sigma to be 5 target = [random.gauss(self.x0, 5), random.gauss(self.y0, 5)] # while not target: # if np.min(self.map_dim[0]) < candidate_target[0] < np.max(self.map_dim[0]) and\ # np.min(self.map_dim[1]) < candidate_target[1] < np.max(self.map_dim[0]): # # it is after the gateline # target = candidate_target # else: # target = None elif style == "near_line": # gate data partially known, need to go around the line x_range = range(np.min(self.map_dim[0]), np.max(self.map_dim[0]), 5) y_estimate = [self.roughline.predict(x) - delta_y * self.before_roughline_sign for x in x_range] choices_idx = range(len(x_range)) candidate_target_idx = random.choice(choices_idx) while not target: if np.min(self.map_dim[1]) < y_estimate[candidate_target_idx] < np.max(self.map_dim[1]): # it is after the gateline target = [x_range(candidate_target_idx), y_estimate(candidate_target_idx)] else: target = None elif style == "along_line": # gate data known, need to go to the three listener point target = random.choice([self.rwl, self.wwl, self.gwl]) elif style == "after_gate": # do a uniform distribution by grid search x_range = range(np.min(self.map_dim[0]), np.max(self.map_dim[0]), 5) y_range = range(np.min(self.map_dim[1]), np.max(self.map_dim[1]), 5) grid = list(itertools.product(x_range, y_range)) # filter out those who is before the gate line while not target: candidate_target = random.choice(grid) if (self.gateline.predict([candidate_target[0]]) - candidate_target[1]) * self.before_line_sign < 0: # it is after the gateline target = candidate_target else: target = None return target + [0] def shutdown(self): pass if __name__ == "__main__": try: totem=Pinger("pinger_planner") except rospy.ROSInterruptException: pass
phamngtuananh/Singaboat_RobotX2016
robotx_nav/nodes/pinger_planner.py
Python
gpl-3.0
16,399
[ "Gaussian" ]
8edde7be48d905a37035ca8d1d06f731fa2f43042106b853bbcbc5b874f996a8
# Lint as: python3 """A high level modeling interface for jax-based statistical + mechanistic models.""" import collections import dataclasses import functools import itertools from typing import Callable import jax import jax.numpy as jnp import numpy as np import xarray import tensorflow_probability tfp = tensorflow_probability.substrates.jax tfd = tfp.distributions from epi_forecast_stat_mech import data_model # pylint: disable=g-bad-import-order from epi_forecast_stat_mech import estimator_base # pylint: disable=g-bad-import-order from epi_forecast_stat_mech import mask_time # pylint: disable=g-bad-import-order from epi_forecast_stat_mech import optim_lib from epi_forecast_stat_mech.mechanistic_models import mechanistic_models # pylint: disable=g-bad-import-order from epi_forecast_stat_mech.mechanistic_models import observables # pylint: disable=g-bad-import-order from epi_forecast_stat_mech.mechanistic_models import predict_lib # pylint: disable=g-bad-import-order from epi_forecast_stat_mech.statistical_models import base as stat_base # pylint: disable=g-bad-import-order from epi_forecast_stat_mech.statistical_models import network_models # pylint: disable=g-bad-import-order from epi_forecast_stat_mech.statistical_models import no_stat_model # pylint: disable=g-bad-import-order from epi_forecast_stat_mech.statistical_models import probability LogLikelihoods = collections.namedtuple( "LogLikelihoods", ["stat_log_prior", "mech_log_prior", "mech_log_likelihood", "stat_log_likelihood"]) LEARNING_RATE_DEFAULT = 5E-4 @dataclasses.dataclass class StatMechEstimator(estimator_base.Estimator): """A place-holder model that uses a mixed statistical/mechanistic approach.""" train_steps: int stat_model: stat_base.StatisticalModel = dataclasses.field( default_factory=network_models.NormalDistributionModel) mech_model: mechanistic_models.MechanisticModel = dataclasses.field( default_factory=mechanistic_models.ViboudChowellModel) fused_train_steps: int = 100 # TODO(mcoram): Resolve whether the "30" is deprecated hack or still useful. time_mask_fn: Callable[..., np.array] = functools.partial( mask_time.make_mask, min_value=30) preprocess_fn: Callable[..., np.array] = lambda x: x fit_seed: int = 42 learning_rate: float = LEARNING_RATE_DEFAULT observable_choice: observables.Observables = dataclasses.field( default_factory=lambda: observables.ObserveSpecified(["log_K"])) def _log_likelihoods( self, params, epidemics, covariates, mech_model, stat_model ): """Computes log-likelihood of the model parameters given epidemics data. The log likelihood computed here consists of two terms. First represents the likelihood of the observed epidemics trajectory under mechanistic model and the second represents the likelihood of the infered parameters given the covariates. In addition both terms include prior assumptions on the parameters of the models. Args: params: tuple of parameters for statistical and mechanistic models. epidemics: named tuple containing observed epidemics trajectory. covariates: array representing covariates for each location. mech_model: mechanistic model. stat_model: statistical model. Returns: namedtuple containing components of the negative log-likelihood. """ stat_params, mech_params = params statistical_log_prior = stat_model.log_prior(stat_params) mechanistic_log_prior = jax.vmap(mech_model.log_prior)(mech_params) epidemic_observables_fn = jax.vmap(self.observable_choice.observables, [None, 0, 0]) epidemic_observables = epidemic_observables_fn(mech_model, mech_params, epidemics) statistical_log_likelihood = stat_model.log_likelihood( stat_params, covariates, epidemic_observables) mech_log_likelihood_fn = jax.vmap(mech_model.log_likelihood) mechanistic_log_likelihood = mech_log_likelihood_fn(mech_params, epidemics) return LogLikelihoods( stat_log_prior=statistical_log_prior, mech_log_prior=mechanistic_log_prior, mech_log_likelihood=mechanistic_log_likelihood, stat_log_likelihood=statistical_log_likelihood) def fit(self, data): seed = self.fit_seed rng = jax.random.PRNGKey(seed) mech_rng, stat_rng = jax.random.split(rng, 2) data_model.validate_data_for_fit(data) data = self.preprocess_fn(data) data_model.validate_data_for_fit(data) data["total"] = ( ("location", "time",), np.cumsum( data.new_infections.transpose("location", "time").values, -1)) self.data = data static_covariates = data.static_covariates.transpose( "location", "static_covariate") self.covariates = covariates = static_covariates.values self.epidemics = epidemics = ( mechanistic_models.pack_epidemics_record_tuple(data)) self.time_mask = self.time_mask_fn(data) mech_params = mechanistic_models.initialize_mech_model_stack( mech_rng, self.mech_model, data, epidemics) epidemic_observables_fn = jax.vmap(self.observable_choice.observables, [None, 0, 0]) epidemic_observables = epidemic_observables_fn(self.mech_model, mech_params, epidemics) # Statistical model initialization stat_params = self.stat_model.init_parameters(stat_rng, covariates, epidemic_observables) init_params = (stat_params, mech_params) def negative_log_prob(params): log_likelihoods = self._log_likelihoods( params, epidemics, covariates, self.mech_model, self.stat_model) mech_log_likelihood = log_likelihoods.mech_log_likelihood mech_log_likelihood = jnp.where(self.time_mask, mech_log_likelihood, 0.) log_likelihoods = log_likelihoods._replace( mech_log_likelihood=mech_log_likelihood) return -1. * sum(jax.tree_leaves(jax.tree_map(jnp.sum, log_likelihoods))) self.params_ = optim_lib.adam_optimize( negative_log_prob, init_params, train_steps=self.train_steps, learning_rate=self.learning_rate, fused_train_steps=self.fused_train_steps) self._is_trained = True return self def _check_fitted(self): if not hasattr(self, "params_"): raise AttributeError("`fit` must be called before `predict`.") def predict(self, test_data, num_samples, seed=0): self._check_fitted() rng = jax.random.PRNGKey(seed) encoded_mech_params = self.mech_params_for_jax_code dynamic_covariates = predict_lib.prepare_dynamic_covariates( self.data, test_data) sample_mech_params_fn = getattr( self, "sample_mech_params_fn", lambda rngkey, num_samples: jnp.swapaxes( jnp.broadcast_to(encoded_mech_params, (num_samples,) + encoded_mech_params.shape), 1, 0)) return predict_lib.simulate_dynamic_predictions( self.mech_model, encoded_mech_params, self.data, self.epidemics, dynamic_covariates, num_samples, rng, sample_mech_params_fn) @property def mech_params(self): self._check_fitted() return predict_lib.mech_params_array(self.data, self.mech_model, self.params_[1]) @property def encoded_mech_params(self): self._check_fitted() return predict_lib.encoded_mech_params_array(self.data, self.mech_model, self.params_[1]) @property def mech_params_for_jax_code(self): return self.encoded_mech_params.values @property def epidemic_observables(self): self._check_fitted() _, mech_params = self.params_ epidemic_observables_fn = jax.vmap(self.observable_choice.observables, [None, 0, 0]) my_observables = epidemic_observables_fn(self.mech_model, mech_params, self.epidemics) return my_observables @property def observables_loc_scale_hat(self): self._check_fitted() stat_params, _ = self.params_ return self.stat_model.get_loc_scale(stat_params, self.covariates, self.epidemic_observables) @property def alpha(self): self._check_fitted() if (issubclass(self.stat_model.predict_module, network_models.LinearModule) or issubclass(self.stat_model.predict_module, network_models.PlainLinearModule)): kernel, unused_intercept = self.stat_model.linear_coefficients( self.params_[0]) assert kernel.shape[1] == len(self.epidemic_observables.keys()), ( f"unexpected kernel shape: {kernel.shape[1]} vs " f"{self.epidemic_observables.keys()}") alpha = xarray.DataArray( np.asarray(kernel), dims=("static_covariate", "encoded_param"), coords=dict( static_covariate=self.data.static_covariate, encoded_param=list(self.epidemic_observables.keys()))) return alpha else: raise AttributeError("no alpha method for stat_model: %s" % (self.stat_model.__class__,)) @property def intercept(self): self._check_fitted() if (issubclass(self.stat_model.predict_module, network_models.LinearModule) or issubclass(self.stat_model.predict_module, network_models.PlainLinearModule)): unused_kernel, bias = self.stat_model.linear_coefficients( self.params_[0]) assert bias.shape == (len(self.epidemic_observables.keys()),), ( f"unexpected bias shape: {bias.shape} vs " f"{self.epidemic_observables.keys()}") bias = xarray.DataArray( np.asarray(bias), dims=("encoded_param"), coords=dict( encoded_param=list(self.epidemic_observables.keys()))) return bias else: raise AttributeError("no intercept method for stat_model: %s" % (self.stat_model.__class__,)) def seven_day_time_smooth_helper_(x): if "time" in x.dims and not np.issubdtype(x.dtype, np.datetime64): return x.rolling(time=7, min_periods=4).mean() else: return x def seven_day_time_smooth(data): return xarray.Dataset({ key: seven_day_time_smooth_helper_(x) for key, x in data.data_vars.items() }) def const_covariates(data): del data["static_covariates"] del data["static_covariate"] data["static_covariates"] = xarray.DataArray( np.zeros((data.sizes["location"], 1)), dims=["location", "static_covariate"]) return data def get_estimator_dict( train_steps=100000, fused_train_steps=100, fit_seed=42, learning_rate=LEARNING_RATE_DEFAULT, list_of_prior_fns=(probability.log_soft_mixed_laplace_on_kernels,), list_of_mech_models=( mechanistic_models.ViboudChowellModel, mechanistic_models.GaussianModel, mechanistic_models.ViboudChowellModelPseudoLikelihood, mechanistic_models.GaussianModelPseudoLikelihood, mechanistic_models.StepBasedMultiplicativeGrowthModel, mechanistic_models.StepBasedSimpleMultiplicativeGrowthModel, mechanistic_models.StepBasedGeneralizedMultiplicativeGrowthModel, mechanistic_models.StepBasedBaselineSEIRModel, mechanistic_models.ViboudChowellModelPublished, mechanistic_models.TurnerModel), list_of_stat_module=(network_models.LinearModule, network_models.PlainLinearModule, network_models.PerceptronModule, no_stat_model.Null), list_of_time_mask_fn=(functools.partial(mask_time.make_mask, min_value=50), functools.partial( mask_time.make_mask, min_value=1, recent_day_limit=6 * 7), functools.partial( mask_time.make_mask, min_value=1, recent_day_limit=4 * 7)), list_of_preprocess_fn=(lambda x: x, seven_day_time_smooth, const_covariates), list_of_prior_names=("LSML",), list_of_mech_names=("VC", "Gaussian", "VC_PL", "Gaussian_PL", "MultiplicativeGrowth", "SimpleMultiplicativeGrowth", "GeneralizedMultiplicativeGrowth", "BaselineSEIR", "VCPub", "Turner"), list_of_stat_names=("Linear", "PlainLinear", "MLP", "Null"), list_of_observable_choices=(observables.InternalParams(),), list_of_observable_choices_names=("ObsEnc",), list_of_time_mask_fn_names=("50cases", "6wk", "4wk"), list_of_preprocess_fn_names=("Id", "7day", "ConstCov"), list_of_error_model_names=("full", "plugin")): # TODO(mcoram): Resolve whether the time_mask_value of "50" is deprecated # hack or still useful. # Create an iterator components_iterator = itertools.product( itertools.product( itertools.product( itertools.product( itertools.product( itertools.product(list_of_prior_fns, list_of_mech_models), list_of_stat_module), list_of_observable_choices), list_of_time_mask_fn), list_of_preprocess_fn), list_of_error_model_names) names_iterator = itertools.product( itertools.product( itertools.product( itertools.product( itertools.product( itertools.product(list_of_prior_names, list_of_mech_names), list_of_stat_names), list_of_observable_choices_names), list_of_time_mask_fn_names), list_of_preprocess_fn_names), list_of_error_model_names) # Combine into one giant dictionary of predictions estimator_dictionary = {} for components, name_components in zip(components_iterator, names_iterator): ((((((prior_fn, mech_model_cls), stat_module), observable_choice), time_mask_fn), preprocess_fn), error_model) = components ((((((prior_name, mech_name), stat_name), observable_choice_name), time_mask_fn_name), preprocess_fn_name), error_model_name) = name_components name_list = [prior_name, mech_name, stat_name] # Check for unhelpful combinations here # TODO(edklein): revisit if we should exclude more ConstCov models if stat_name == "Null" and error_model_name == "plugin": continue if stat_name == "Null" and preprocess_fn_name == "ConstCov": continue # To preserve old names, I'm dropping ObsLogK from the name. if observable_choice_name != "ObsLogK": name_list.append(observable_choice_name) # To preserve old names, I'm dropping 50cases from the name. if time_mask_fn_name != "50cases": name_list.append(time_mask_fn_name) # To preserve old names, I'm dropping Id from the name. if preprocess_fn_name != "Id": name_list.append(preprocess_fn_name) if error_model_name != "full": name_list.append(error_model_name) model_name = "_".join(name_list) mech_model = mech_model_cls() if error_model != error_model_name: raise ValueError( f"Expected agreement b/w error_model and error_model_name: " f"{error_model}, {error_model_name}" ) if stat_name == "Null": # there must be a better way to do this stat_model = stat_module() elif error_model_name == "full": stat_model = network_models.NormalDistributionModel( predict_module=stat_module, log_prior_fn=prior_fn, scale_eps=1E-2, error_model="full") elif error_model_name == "plugin": stat_model = network_models.NormalDistributionModel( predict_module=stat_module, log_prior_fn=prior_fn, scale_eps=mech_model.bottom_scale, error_model="plugin") else: raise ValueError(f"Unexpected error_model_name: {error_model_name}") estimator_dictionary[model_name] = StatMechEstimator( train_steps=train_steps, stat_model=stat_model, mech_model=mech_model, fused_train_steps=fused_train_steps, time_mask_fn=time_mask_fn, preprocess_fn=preprocess_fn, fit_seed=fit_seed, learning_rate=learning_rate, observable_choice=observable_choice) estimator_dictionary["Laplace_VC_Linear_ObsChar1"] = StatMechEstimator( train_steps=train_steps, stat_model=network_models.NormalDistributionModel( predict_module=network_models.LinearModule, log_prior_fn=probability.laplace_prior), mech_model=mechanistic_models.ViboudChowellModel(), fused_train_steps=fused_train_steps, time_mask_fn=functools.partial(mask_time.make_mask, min_value=50), fit_seed=fit_seed, learning_rate=learning_rate, observable_choice=observables.ObserveSpecified([ "log_r", "log_a", "log_characteristic_time", "log_characteristic_height" ])) return estimator_dictionary
HopkinsIDD/EpiForecastStatMech
epi_forecast_stat_mech/stat_mech_estimator.py
Python
apache-2.0
17,310
[ "Gaussian" ]
f0eb45855845fc0ee2fac10ce0e9ed908be8f923c8a08c429d596692dc4c1d92
#! /usr/bin/env python3 import os import sys import stat class default_params: mode_res = 256 output_timestep_size = -1 f_sphere = 0 timestepping_method = 'l_erk' timestepping_order = 1 verbosity=0 timestep_size = 0.001 max_timesteps = 1 normal_mode_analysis = 0 rexi_m = 256 rexi_h = 0.15 rexi_half_poles = 1 sphere_extended_modes = 0 g = 1 # gravity h = 100000 # avg height f = 0.00014584 # coriolis effect r = 6371220 # radius # 3: gaussian breaking dam # 4: geostrophic balance test case bench_id = 4 use_robert_functions = 1 pde_id = 0 nonlinear = 0 viscosity = 0 simtime = 0.001 #math.inf rexi_par = 0 postprocessing = 0 compute_error = 0 def create_job_script(self): content = "#! /bin/bash\n" content += """ BASEDIR="`pwd`" rm -f ./prog_h_* rm -f ./prog_u_* rm -f ./prog_v_* SWEETROOT="../../../" cd "$SWEETROOT" # Always load local software source ./local_software/env_vars.sh || exit 1 make clean || exit 1 """ # content += """ #SCONS="scons --program=swe_sphere --gui=disable --plane-spectral-space=disable --sphere-spectral-space=enable --mode=release """+("--threading=off --rexi-thread-parallel-sum=enable" if p.rexi_par else "disable")+'"'+""" content += """ SCONS="scons --program=swe_sphere --gui=disable --plane-spectral-space=disable --sphere-spectral-space=enable --mode=release --threading=off" echo "$SCONS" $SCONS || exit 1 """ content += """ cd "$BASEDIR" """ content += 'EXEC="$SWEETROOT/build/swe_sphere_spherespectral_spheredealiasing_libfft_gnu_release' content += ' -g '+str(self.g) content += ' -H '+str(self.h) content += ' -f '+str(self.f) content += ' -F '+str(self.f_sphere) content += ' -a '+str(self.r) content += ' -M '+str(self.space_res_spectral) content += ' --pde-id '+str(self.pde_id) content += ' -s '+str(self.bench_id) content += ' -v '+str(self.verbosity) content += ' -C '+str(-self.timestep_size) content += ' -T '+str(self.max_timesteps) content += ' -o '+str(self.output_timestep_size) content += ' -O -' # deactivate file output content += ' -u '+str(self.viscosity) content += ' -t '+str(self.max_simulation_time) content += ' --nonlinear='+str(self.nonlinear) content += ' --timestepping-method='+self.timestepping_method content += ' --timestepping-order='+str(self.timestepping_order) content += ' --normal-mode-analysis-generation='+str(self.normal_mode_analysis) content += ' --rexi-m='+str(self.rexi_m) content += ' --rexi-h='+str(self.rexi_h) content += ' --rexi-half='+str(self.rexi_half_poles) content += ' --use-robert-functions='+str(self.use_robert_functions) content += ' --rexi-ext-modes='+str(self.sphere_extended_modes) content += ' --compute-error='+str(self.compute_error) content += '"' content += "\n" content += """ echo "$EXEC" taskset -c 1 $EXEC || exit 1 """ if self.postprocessing: content += """ ../pp_plot_csv.py prog_h_*.csv ../pp_create_mp4.sh prog_h out_prog_h.mp4 """ return content def create_job_id(self): idstr = '' idstr += '_modes'+str(self.space_res_spectral).zfill(3) # idstr += '_bench'+str(self.bench_id) # idstr += '_nonlin'+str(self.nonlinear) idstr += '_g'+str(self.g) idstr += '_h'+str(self.h) idstr += '_f'+str(self.f) idstr += '_a'+str(self.r) idstr += '_u'+str(self.viscosity) idstr += '_robert'+str(self.use_robert_functions) idstr += '_pdeid'+str(self.pde_id) idstr += '_fsphere'+str(self.f_sphere) idstr += '_C'+str(self.timestep_size).zfill(8) idstr += '_Tn'+str(self.max_timesteps).zfill(3) idstr += '_t'+str(self.max_simulation_time).zfill(8) idstr += '_o'+str(self.output_timestep_size).zfill(8) idstr += '_tsm_'+str(self.timestepping_method) idstr += '_tso'+str(self.timestepping_order) if True: idstr += '_rexim'+str(self.rexi_m).zfill(8) idstr += '_rexih'+str(self.rexi_h) idstr += '_rexihalf'+str(self.rexi_half_poles) idstr += '_rexiextmodes'+str(self.sphere_extended_modes).zfill(2) # idstr += '_rexipar'+str(1 if self.rexi_par else 0) return idstr def gen_script(self, dirname, scriptname): if not os.path.exists(dirname): os.makedirs(dirname) scriptname = 'run.sh' fullpath = dirname+'/'+scriptname print("WRITING "+fullpath) script_file = open(fullpath, 'w') script_file.write(self.create_job_script()) script_file.close() st = os.stat(fullpath) os.chmod(fullpath, st.st_mode | stat.S_IEXEC) p = default_params() p.use_robert_functions = 1 p.f_sphere = 0 p.sphere_extended_modes = 2 # # Smaller values lead to no solution for the vort/div formulation # See gaussian_ts_comparison_earth_scale_M16 # This shows that good results for RK can be computed with # RK2 and a time step size of 200 # p.timestep_size = 100 p.max_timesteps_nr = 10 p.max_simulation_time = p.timestep_size*p.max_timesteps for p.space_res_spectral in [64, 128, 256]: #################################### # REXI #################################### if True: p.timestepping_method = 'l_rexi' p.timestepping_order = 0 p.rexi_m = 16 p.gen_script('script'+p.create_job_id(), 'run.sh') p.rexi_m = 0 #################################### # RKn #################################### for i in [1, 2, 3, 4]: p.timestepping_method = 'l_erk' p.timestepping_order = i p.gen_script('script'+p.create_job_id(), 'run.sh') #################################### # LF2 #################################### if True: p.timestepping_method = 'l_lf' p.timestepping_order = 2 p.gen_script('script'+p.create_job_id(), 'run.sh') #################################### # IRK1 #################################### for i in [1]: p.timestepping_method = 'l_irk' p.timestepping_order = i p.gen_script('script'+p.create_job_id(), 'run.sh') #################################### # CN #################################### for i in [2]: p.timestepping_method = 'l_cn' p.timestepping_order = i p.gen_script('script'+p.create_job_id(), 'run.sh')
schreiberx/sweet
benchmarks_sphere/paper_jrn_nla_rexi_linear/sph_rexi_paper_time_to_solution_timestepping_methods/jobs_create.py
Python
mit
5,995
[ "Gaussian" ]
cce15948e094b1c116053b0e881ccf077d331100d4584101eb9bf9f59aeaa8d8
import copy import json import subprocess from os import system import pyrebase import requests import collections from Firefly import aliases, logging, scheduler from Firefly.const import API_ALEXA_VIEW, API_FIREBASE_VIEW, FIREFLY_SECURITY_MONITORING, SOURCE_LOCATION, SOURCE_TIME, TYPE_DEVICE, TYPE_ROUTINE from Firefly.core.service_handler import ServiceConfig, ServicePackage from Firefly.helpers.metadata import EXPORT_UI, FF_ID, HIDDEN_BY_USER, PRIMARY_ACTION from Firefly.helpers.service import Command, Request, Service from Firefly.services.alexa.alexa import process_alexa_request from Firefly.services.api_ai import apiai_command_reply from Firefly.services.firebase.event_logging import EventLogger FIREBASE_LOCATION_STATUS_PATH = 'locationStatus' FIREBASE_DEVICE_VIEWS = 'deviceViews' FIREBASE_DEVICE_STATUS = 'deviceStatus' FIREBASE_DEVICE_SETTINGS_PATH = 'deviceSettings' FIREBASE_HOME_STATUS = 'homeStatus' FIREBASE_COMMAND_REPLY = 'commandReply' FIREBASE_ALIASES = 'aliases' ALEXA_CUSTOM_SKILL_ID = 'firefly-alexa' # This is the action when status messages are updated STATUS_MESSAGE_UPDATED = { 'status_message': 'updated' } def internet_up(): return system("ping -c 1 8.8.8.8") == 0 TITLE = 'Firebase Service for Firefly' AUTHOR = 'Zachary Priddy me@zpriddy.com' SERVICE_ID = 'service_firebase' COMMANDS = ['push', 'refresh', 'get_api_id'] REQUESTS = [] SECTION = 'FIREBASE' MAX_EVENTS = 1000 # TODO: Setup function should get the config from the service config file. If the # required params are not in the config file then it should log and error message # and abort install # TODO: push this data to location weather info.. this could be useful def Setup(firefly, package, alias, ff_id, service_package: ServicePackage, config: ServiceConfig, **kwargs): logging.info('Installing Firebase Service') firebase = Firebase(firefly, alias, ff_id, service_package, config, **kwargs) firefly.install_component(firebase) return True class Firebase(Service): def __init__(self, firefly, alias, ff_id, service_package: ServicePackage, config: ServiceConfig, **kwargs): # TODO: Fix this package = service_package.package super().__init__(firefly, SERVICE_ID, package, TITLE, AUTHOR, COMMANDS, REQUESTS) logging.info('[FIREBASE] setting up firebase') self.service_config = config self.api_key = config.api_key self.auth_domain = config.auth_domain self.database_url = config.database_url self.storage_bucket = config.storage_bucket self.email = config.email self.password = config.password self.facebook = config.facebook self.home_id = config.home_id # Create the event logger self.event_logger = EventLogger(self) # Event history will hold the last 1000 events and overwrite existing events when buffer is full self.event_history = collections.deque(maxlen=MAX_EVENTS) self.events_since_clear = 0 self.add_command('push', self.push) self.add_command('refresh', self.refresh_all) self.add_command('get_api_id', self.get_api_id) self.config = { "apiKey": self.api_key, "authDomain": self.auth_domain, "databaseURL": self.database_url, "storageBucket": self.storage_bucket } logging.info('[FIREBASE] logging into firebase') self.firebase = pyrebase.initialize_app(self.config) # Get a reference to the auth service self.auth = self.firebase.auth() # Log the user in self.user = self.auth.sign_in_with_email_and_password(self.email, self.password) self.uid = self.user['localId'] self.id_token = self.user['idToken'] # Get a reference to the database service self.db = self.firebase.database() if self.home_id is None: self.register_home() scheduler.runEveryM(30, self.refresh_user) scheduler.runEveryM(20, self.refresh_all) scheduler.runInS(30, self.refresh_all) logging.info('[FIREBASE] starting stream') self.stream = self.db.child('homeStatus').child(self.home_id).child('commands').stream(self.command_stream_handler, self.id_token) self.commandReplyStream = self.db.child('homeStatus').child(self.home_id).child('commandReply').stream(self.command_reply, self.id_token) logging.info('[FIREBASE] stream started') def register_home(self): logging.info('Registering Home On Firebase!!!!') register_url = 'https://us-central1-firefly-beta-cdb9d.cloudfunctions.net/registerHome' return_data = requests.post(register_url, data={ 'uid': self.uid }).json() self.home_id = return_data.get('home_id') if self.home_id is None: logging.notify('error registering home') return self.service_config.home_id = self.home_id self.service_config.save() logging.info('Config file for firebase has been updated.') def process_settings(self, message, **kwargs): logging.info('[FIREBASE] PROCESSING SETTINGS: %s' % str(message)) if message.get('notification', {}).get('facebook') is not None: enable = bool(message.get('notification', {}).get('facebook')) self.set_facebook_settings(enable) def set_facebook_settings(self, enable, **kwargs): self.facebook = enable logging.info('[FIREBASE] Enabling/Disabling Facebook. %s' % str(enable)) self.service_config.facebook = enable self.service_config.save() if enable: self.send_facebook_notification("Facebook notifications for firefly are now enabled.") else: self.send_facebook_notification("Facebook notifications for firefly are now disabled.") logging.info('Config file for hue has been updated.') def refresh_stream(self): if not internet_up(): logging.error('[FIREBASE REFRESH STREAM] Internet is down') scheduler.runInM(1, self.refresh_stream, 'firebase_internet_down_refresh_stream') return self.stream.close() self.stream = self.db.child('homeStatus').child(self.home_id).child('commands').stream(self.command_stream_handler, self.id_token) self.commandReplyStream.close() self.commandReplyStream = self.db.child('homeStatus').child(self.home_id).child('commandReply').stream(self.command_reply, self.id_token) def command_reply(self, message): data = message['data'] # Take path and split it. we are only going to process top level paths. This should be the clientID. raw_path = message['path'] path_list = raw_path[1:].split('/') path_depth = len(path_list) if path_depth > 1 or data is None: logging.debug('[FIREBASE COMMAND REPLY] message was updated or deleted') return path = path_list[0] client_id = path logging.debug('[FIREBASE COMMAND REPLY] processing for client: %s' % client_id) response = {} if path == ALEXA_CUSTOM_SKILL_ID: alexa_request = data['service_alexa'] response = process_alexa_request(self.firefly, alexa_request) if response: logging.debug('[FIREBASE COMMAND REPLY] sending response : %s' % str(response)) self.db.child(FIREBASE_HOME_STATUS).child(self.home_id).child(FIREBASE_COMMAND_REPLY).child(client_id).child('reply').set(response, self.id_token) return # TODO: Remove this after upgrading all cloud functions if not message['data']: return if message['data'].get('reply') is not None or message.get('reply') is not None: return key = message['path'][1:] if 'reply' in key or 'speech' in key: return try: reply = apiai_command_reply(self.firefly, message['data']) self.db.child('homeStatus').child(self.home_id).child('commandReply').child(key).child('reply').set(reply, self.id_token) except Exception as e: print(str(e)) def firebase_send_command(self, ff_id, command): ''' process and send command from firebase commands Args: ff_id: device to send command to command: command to be sent Returns: ''' logging.info('[FIREBASE SEND COMMAND] : %s:%s' % (str(ff_id), str(command))) # Location is a special case. if ff_id == 'location': if type(command) is not dict: return for command_string, command_args in command.items(): send_command = Command(ff_id, 'web_api', command_string, **command_args) self.firefly.location.process_command(send_command) return if ff_id == 'settings': self.process_settings(command) return if ff_id == 'system' and command == 'restart': subprocess.run(['bash /opt/firefly_system/Firefly/system_files/restart_firefly.sh'], shell=True) if type(command) is str: send_command = Command(ff_id, 'web_api', command) logging.info('FIREBASE SENDING COMMAND: %s ' % str(send_command)) self.firefly.send_command(send_command) if command == 'delete': scheduler.runInS(10, self.update_device_views, job_id='firebase_refresh') return # TODO Handle install package command # if list(command.keys())[0] == 'install_package': # self.firefly.install_package(**dict(list(command.values())[0])) if type(command) is dict: for command_string, command_args in command.items(): send_command = Command(ff_id, 'web_api', command_string, **command_args) logging.info('FIREBASE SENDING COMMAND: %s ' % str(send_command)) self.firefly.send_command(send_command) return def command_stream_handler(self, message): ''' Handle commands sent from the ui Args: message: message from command stream Returns: ''' try: logging.message('FIREBASE MESSAGE: %s ' % str(message)) # Return if no data if message['data'] is None: return if message['path'] == '/': for ff_id, command in message['data'].items(): self.firebase_send_command(ff_id, command) self.db.child('homeStatus').child(self.home_id).child('commands').child(ff_id).remove(self.id_token) else: ff_id = message['path'][1:] command = message['data'] self.firebase_send_command(ff_id, command) self.db.child('homeStatus').child(self.home_id).child('commands').child(ff_id).remove(self.id_token) except Exception as e: logging.error('Firebase Stream Error: %s' % str(e)) def refresh_all(self, **kwargs): # Hard-coded refresh all device values # TODO use core api for this. all_values = {} for ff_id, device in self.firefly.components.items(): try: all_values[ff_id] = device.get_all_request_values(True) except: pass # Nasty json sanitation all_values = scrub(all_values) all_values = json.dumps(all_values) all_values = all_values.replace('null', '') all_values = all_values.replace('#', '') all_values = all_values.replace('$', '') all_values = all_values.replace('/', '_-_') all_values = json.loads(all_values) try: alexa_views = self.get_all_alexa_views('firebase') routines = self.get_routines() # TODO(zpriddy): Remove old views when new UI is done # self.db.child("userAlexa").child(self.uid).child("devices").set(alexa_views, self.id_token) # self.db.child("homeStatus").child(self.home_id).child('devices').update(all_values, self.id_token) self.db.child("homeStatus").child(self.home_id).child('routines').set(routines['config'], self.id_token) # End of old views routine_view = {} for r in routines['view']: routine_view[r.get('ff_id')] = r routine_config = {} for r in routines['config']: routine_config[r.get('ff_id')] = r # Update all devices statuses self.update_all_device_status(overwrite=True) # Update device settings self.update_device_settings() # This is the new location of routine views [/homeStatus/{homeId}/routineViews] self.db.child("homeStatus").child(self.home_id).child('routineViews').set(routine_view, self.id_token) self.db.child("homeStatus").child(self.home_id).child('routineConfigs').set(routine_config, self.id_token) # This is the new location of location status [/homeStatus/{homeId}/locationStatus] self.update_location_status(overwrite=True, update_metadata_timestamp=False) # This is the new location of alexa api data [/homeStatus/{homeId}/alexaAPIView] self.db.child("homeStatus").child(self.home_id).child('alexaAPIViews').set(alexa_views, self.id_token) groups = {} groups_state = {} for ff_id, group in self.firefly.components.items(): if group.type != 'GROUP': continue groups[ff_id] = group.get_metadata() groups_state[ff_id] = group.get_all_request_values(True) self.db.child("homeStatus").child(self.home_id).child('groupViews').set(groups, self.id_token) self.db.child("homeStatus").child(self.home_id).child('groupStatus').set(groups_state, self.id_token) self.update_device_views() except Exception as e: logging.notify("Firebase 271: %s" % str(e)) def update_device_settings(self): logging.info('[FIREBASE] updating device settings') device_settings = {} for ff_id, device in self.firefly.components.items(): try: if device.type != TYPE_DEVICE: continue device_settings[ff_id] = device.get_settings_view() except: pass self.set_home_status(FIREBASE_DEVICE_SETTINGS_PATH, device_settings) def update_last_metadata_timestamp(self): ''' Update the lastMetadataUpdate timestamp Returns: ''' self.set_home_status('locationStatus/lastMetadataUpdate', self.firefly.location.now.timestamp()) def set_home_status(self, path, data, retry=True, **kwargs): ''' Function to set homeStatus in firebase Args: path: path from homeStatus/{homeID}/ that will be set. data: data that will be set. Returns: ''' try: self.db.child("homeStatus").child(self.home_id).child(path).set(data, self.id_token) return True except Exception as e: if not retry: return False logging.error('[FIREBASE SET HOME STATUS] ERROR: %s' % str(e)) self.refresh_user() return self.set_home_status(path, data, False) def update_home_status(self, path, data, retry=True, **kwargs): ''' Function to update homeStatus in firebase Args: path: path from homeStatus/{homeID}/ that will be updateed. data: data that will be updateed. Returns: ''' try: self.db.child("homeStatus").child(self.home_id).child(path).update(data, self.id_token) return True except Exception as e: if not retry: return False logging.error('[FIREBASE UPDATE HOME STATUS] ERROR: %s' % str(e)) self.refresh_user() return self.update_home_status(path, data, False) def update_location_status(self, overwrite=False, update_metadata_timestamp=False, update_status_message=False, **kwargs): ''' update the location status in firebase. Args: overwrite: if true calls set instead of update. update_metadata_timestamp: also update metadata timestamp. When calling set without updating the timestamp the timestamp will be removed. update_status_message: clear all status messages and inset current status messages **kwargs: Returns: ''' location_status = self.get_location_status() if overwrite: self.set_home_status(FIREBASE_LOCATION_STATUS_PATH, location_status) if update_metadata_timestamp: self.update_last_metadata_timestamp() return if update_status_message: self.set_home_status('%s/statusMessages' % FIREBASE_LOCATION_STATUS_PATH, {}) self.update_home_status(FIREBASE_LOCATION_STATUS_PATH, location_status) def update_security_status(self, status): self.update_home_status('%s/security' % FIREBASE_LOCATION_STATUS_PATH, status) def update_device_min_views(self, device_views, **kwargs): device_min_view = {} for ff_id, device_view in device_views.items(): try: primary_action = device_view['metadata']['primary'] device_min_view[ff_id] = { FF_ID: device_view [FF_ID], 'alias': device_view ['alias'], # TODO: Update this to hidden_by_user or hidden_by_firefly when ready. EXPORT_UI: device_view [EXPORT_UI], HIDDEN_BY_USER: device_view[EXPORT_UI], PRIMARY_ACTION: { primary_action: device_view['metadata']['actions'][primary_action] } } except Exception as e: logging.error('[FIREBASE DEVICE MIN VIEW] error: %s' % str(e)) logging.debug('[FIREBASE DEVICE MIN VIEW] setting min view: %s' % str(device_min_view)) self.set_home_status('deviceMinView', device_min_view) def update_device_views(self, **kwargs): ''' Update device views metadata for all devices Args: **kwargs: Returns: ''' logging.info('[FIREBASE DEVICE VIEW UPDATE] updating all device views') device_views = {} devices = self.get_all_component_views('firebase_refresh', filter=TYPE_DEVICE) for device in devices: device_views[device.get(FF_ID, 'unknown')] = device self.set_home_status(FIREBASE_DEVICE_VIEWS, device_views) self.update_device_min_views(device_views) # TODO: Remove this check_all_keys(device_views) self.set_home_status('devices', device_views) self.update_aliases() self.update_last_metadata_timestamp() def update_all_device_status(self, overwrite=False, **kwargs): # TODO use core api for this. all_values = {} for ff_id, device in self.firefly.components.items(): try: all_values[ff_id] = device.get_all_request_values(True) except: pass for device, device_view in all_values.items(): try: if 'PARAMS' in device_view.keys(): device_view.pop('PARAMS') if 'RAW_VALUES' in device_view.keys(): device_view.pop('RAW_VALUES') if 'SENSORS' in device_view.keys(): device_view.pop('SENSORS') if 'ZWAVE_VALUES' in device_view.keys(): device_view.pop('ZWAVE_VALUES') except: pass # TODO Remove this check_all_keys(all_values) # self.update_home_status('devices', all_values) if overwrite: self.set_home_status(FIREBASE_DEVICE_STATUS, all_values) return self.update_home_status(FIREBASE_DEVICE_STATUS, all_values) def update_device_status(self, ff_id, action: dict, **kwargs): ''' Update a single device status Args: ff_id: ff_id of the device to update action: the action data to update **kwargs: Returns: ''' # TODO(zpriddy): Find a better way to do this if 'PARAMS' in action.keys(): return if 'RAW_VALUES' in action.keys(): return if 'SENSORS' in action.keys(): return if 'ZWAVE_VALUES' in action.keys(): return path = '%s/%s' % (FIREBASE_DEVICE_STATUS, ff_id) self.update_home_status(path, action) def update_aliases(self, **kwargs): ''' update all device aliases from firefly. Args: **kwargs: Returns: ''' self.set_home_status(FIREBASE_ALIASES, aliases.aliases) def get_routines(self): routines = { 'view': [], 'config': [] } for ff_id, d in self.firefly.components.items(): logging.info('[FIREBASE]: getting routine view for: %s-%s' % (ff_id, d.type)) if d.type == TYPE_ROUTINE: logging.info('[FIREBASE]: getting routine view for (2): %s' % ff_id) routines['view'].append(d.export(firebase_view=True)) routines['config'].append(d.export()) return routines def get_component_view(self, ff_id, source): device_request = Request(ff_id, source, API_FIREBASE_VIEW) data = self.firefly.components[device_request.ff_id].request(device_request) return data def get_component_alexa_view(self, ff_id, source): logging.info('[FIREBASE] getting alexa view for %s' % ff_id) device_request = Request(ff_id, source, API_ALEXA_VIEW) data = self.firefly.components[device_request.ff_id].request(device_request) return data def get_all_alexa_views(self, source, filter=[TYPE_DEVICE, TYPE_ROUTINE]): if type(filter) is str: filter = [filter] views = [] for ff_id, device in self.firefly.components.items(): if device.type in filter or filter is None: data = self.get_component_alexa_view(ff_id, source) if data is not None and len(data.get('capabilities')) > 0: views.append(data) return views def get_all_component_views(self, source, filter=None): if type(filter) is str: filter = [filter] views = [] for ff_id, device in self.firefly.components.items(): if device.type in filter or filter is None: data = self.get_component_view(ff_id, source) views.append(data) return views def get_location_status(self, **kwargs): """ Get the location status. Args: **kwargs: Returns: dict of location status """ now = self.firefly.location.now return_data = { 'security': {}, 'time': { 'epoch': now.timestamp(), 'day': now.day, 'month': now.month, 'year': now.year, 'hour': now.hour, 'minute': now.minute, 'str': str(now), 'timeZone': self.firefly.location.geolocation.timezone }, 'location': { 'lat': self.firefly.location.latitude, 'lon': self.firefly.location.longitude, 'address': self.firefly.location.address }, 'isDark': self.firefly.location.isDark, 'mode': self.firefly.location.mode, 'lastMode': self.firefly.location.lastMode, 'statusMessages': self.firefly.location.status_messages, 'modes': self.firefly.location.modes } try: return_data['security']['status'] = self.firefly.security_and_monitoring.get_alarm_status() except: pass return return_data def refresh_user(self): ''' Refresh user token and auth Returns: ''' logging.info('[FIREBASE] REFRESHING USER') if not internet_up(): logging.error('[FIREBASE REFRESH] Internet seems to be down') scheduler.runInM(1, self.refresh_user, 'refresh_user_internet_down') return try: try: self.stream.close() self.commandReplyStream.close() except: pass self.user = self.auth.sign_in_with_email_and_password(self.email, self.password) self.id_token = self.user['idToken'] self.stream = self.db.child('homeStatus').child(self.home_id).child('commands').stream(self.command_stream_handler, self.id_token) self.commandReplyStream = self.db.child('homeStatus').child(self.home_id).child('commandReply').stream(self.command_reply, self.id_token) except Exception as e: logging.info("Firebase 266: %s" % str(e)) scheduler.runInH(1, self.refresh_user, 'firebase_refresh_user') pass def security_update(self, security_status): logging.info('Updating Firebase security status') self.set_home_status("securityStatus", security_status) def push(self, source, action, retry=True): logging.info('[FIREBASE PUSH] Pushing Data: %s: %s' % (str(source), str(action))) try: # Update time events if source == SOURCE_TIME: self.update_location_status() return # Update location events if source == SOURCE_LOCATION: update_status_message = action == STATUS_MESSAGE_UPDATED self.update_location_status(update_status_message=update_status_message) self.send_event(source, action) return if source == FIREFLY_SECURITY_MONITORING: self.update_security_status(action) self.send_event(source, action) return if source not in self.firefly.components: logging.error('[FIREBASE PUSH] ERROR: Source not in firefly components.') return if self.firefly.components[source].type == 'GROUP': self.db.child("homeStatus").child(self.home_id).child('groupStatus').child(source).update(action, self.id_token) self.send_event(source, action) return self.update_device_status(source, action) # TODO(zpriddy): Remove this when new UI is done. if 'PARAMS' in action.keys(): return if 'RAW_VALUES' in action.keys(): return if 'SENSORS' in action.keys(): return if 'ZWAVE_VALUES' in action.keys(): return self.db.child("homeStatus").child(self.home_id).child('devices').child(source).update(action, self.id_token) self.send_event(source, action) except Exception as e: logging.info('[FIREBASE PUSH] ERROR: %s' % str(e)) self.refresh_user() if retry: self.push(source, action, False) def send_event(self, source, action): ''' add new event in the event log Args: source: ff_id of the device action: action to enter into event log Returns: ''' if 'last_update' in action.keys(): action.pop('last_update') if 'status_message' in action.keys(): action.pop('status_message') if not action: return now = self.firefly.location.now now_time = now.strftime("%B %d %Y %I:%M:%S %p") self.event_logger.event(source, action, now.timestamp()) event_key = '%s-%s' % (str(now.timestamp()).replace('.', ''), source) event_data = { 'ff_id': source, 'event': action, 'timestamp': now.timestamp(), 'time': now_time } if self.events_since_clear > MAX_EVENTS: last_x_events = {} self.events_since_clear = 0 for e in self.event_history: last_x_events[e.get('key')] = e.get('data') self.db.child("homeStatus").child(self.home_id).child('events').set(last_x_events, self.id_token) self.db.child("homeStatus").child(self.home_id).child('events').child(event_key).set(event_data, self.id_token) self.event_history.append({'key':event_key, 'data':event_data}) self.events_since_clear += 1 ''' self.db.child("homeStatus").child(self.home_id).child('events').push({ 'ff_id': source, 'event': action, 'timestamp': now.timestamp(), 'time': now_time }, self.id_token) ''' def push_notification(self, message, priority, retry=True): try: self.send_notification(message, priority) if self.facebook: self.send_facebook_notification(message) except: self.refresh_user() if retry: self.push_notification(message, priority, False) def send_notification(self, message, priority): now = self.firefly.location.now now_time = now.strftime("%B %d %Y %I:%M:%S %p") self.db.child("homeStatus").child(self.home_id).child('notifications').push({ 'message': message, 'priority': priority, 'timestamp': now.timestamp(), 'time': now_time }, self.id_token) def send_facebook_notification(self, message, **kwargs): logging.info("[FIREBASE FACEBOOK] SENDING NOTIFICATION") self.db.child("homeStatus").child(self.home_id).child("facebookNotifcations").push(message, self.id_token) def get_api_id(self, **kwargs): ff_id = kwargs.get('api_ff_id') callback = kwargs.get('callback') my_stream = None if ff_id is None or callback is None: return False def stream_api_key(message): data = message.get('data') if data is None: return api_key = data if api_key is None: return callback(firebase_api_key=api_key) try: my_stream.close() except: pass now = self.firefly.location.now.timestamp() self.db.child("homeStatus").child(self.home_id).child("apiDevices").update({ ff_id: { 'added': now } }, self.id_token) my_stream = self.db.child("homeStatus").child(self.home_id).child("apiDevices").child(ff_id).child('apiKey').stream(stream_api_key, self.id_token) def upload_log(self, filename, **kwargs): storage = self.firebase.storage() remote_file_name = '%s_event_log_%s.json' % (self.home_id, self.firefly.location.now.timestamp()) storage.child(self.home_id).child(remote_file_name).put(filename, self.id_token) def scrub(x): # Converts None to empty string ret = copy.deepcopy(x) # Handle dictionaries, lits & tuples. Scrub all values if isinstance(x, dict): for k, v in ret.items(): ret[k] = scrub(v) if isinstance(x, (list, tuple)): if isinstance(x, (tuple)): logging.notify(str(x)) for k, v in enumerate(ret): ret[k] = scrub(v) # Handle None if x is None: ret = '' # Finished scrubbing return ret FIREBASE_INVALID_CHARS = ['/', '\\', '$', '#'] def has_invalid_char(string_to_check): for c in FIREBASE_INVALID_CHARS: if c in string_to_check: return True return False def check_all_keys(firebase_dict): for key in firebase_dict: if has_invalid_char(key): logging.critical('[FIREBASE CHECK ALL KEYS] ****************** BAD KEY: %s' % key) if type(firebase_dict[key]) is dict: check_all_keys(firebase_dict[key])
Firefly-Automation/Firefly
Firefly/services/firebase/firebase.py
Python
apache-2.0
29,353
[ "Firefly" ]
f0d7a92c3c9d25d78f5de65a4e51700b4859d1becc0ad0e40b7e01f4a0813f19
# -*- coding: utf-8 -*- """ Gaussians model: Inferring a mean and standard deviation. Chapter 4.1, Bayesian Cognitive Modeling. Created Aug/2015 by Johannes Keyser <j.keyser@donders.ru.nl> """ import pymc3 as pm import numpy as np import pandas as pd x = np.array([1.1, 1.9, 2.3, 1.8]) model = pm.Model() with model: # priors mu = pm.Normal('mu', mu=0, sd=100) # or tau=0.001 == 1/100**2 sigma = pm.Uniform('sigma', lower=.1, upper=10) # data come from a Gaussian x = pm.Normal('x', mu=mu, sd=sigma, observed=x) # instantiate sampler stepFunc = pm.Metropolis() # or try pm.NUTS() # draw posterior samples (in 4 parallel running chains) Nsample = 1000 Nchains = 4 traces = pm.sample(Nsample, step=stepFunc, njobs=Nchains) plotVars = ('mu','sigma') axs = pm.traceplot(traces, vars=plotVars, combined=False) # plot joint posterior samples tstr = 'Joint posterior samples' post = np.vstack([traces['mu'], traces['sigma']]) post = post.transpose() df = pd.DataFrame(post, columns=plotVars) ax = df.plot(kind='scatter', x=plotVars[0], y=plotVars[1], alpha=.1, title=tstr)
JoKeyser/BCMinPyMC3
ch4-1_Gaussian.py
Python
gpl-3.0
1,114
[ "Gaussian" ]
36838bd0027a7816f48709c4410a623cbf86fa4904730201b9808fe4fffd6ad7
# Copyright (C) 2010-2018 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 """ This modules allows to expose ESPREsSo's coordinates and particle attributes to MDAnalysis without need to save information to files. The main class is Stream(), which is used to initialize the stream of data to MDAnalysis' readers. These are the topology reader ESPParser(TopologyReaderBase) and the coordinates reader ESPReader(SingleFrameReaderBase). A minimal working example is the following: >>> # imports >>> import espressomd >>> from espressomd import MDA_ESP >>> import MDAnalysis as mda >>> # system setup >>> system = espressomd.System() >>> system.time_step = 1. >>> system.cell_system.skin = 1. >>> system.box_l = [10.,10.,10.] >>> system.part.add(id=0,pos=[1.,2.,3.]) >>> # set up the stream >>> eos = MDA_ESP.Stream(system) >>> # feed Universe with a topology and with coordinates >>> u = mda.Universe(eos.topology,eos.trajectory) >>> print u <Universe with 1 atoms> """ try: import cStringIO as StringIO except ImportError: from io import StringIO import numpy as np import MDAnalysis from distutils.version import LooseVersion from MDAnalysis.lib import util from MDAnalysis.coordinates.core import triclinic_box from MDAnalysis.lib.util import NamedStream from MDAnalysis.topology.base import TopologyReaderBase from MDAnalysis.coordinates import base from MDAnalysis.coordinates.base import SingleFrameReaderBase from MDAnalysis.core.topology import Topology from MDAnalysis.core.topologyattrs import ( Atomnames, Atomids, Atomtypes, Masses, Resids, Resnums, Segids, Resnames, AltLocs, ICodes, Occupancies, Tempfactors, Charges ) class Stream(object): def __init__(self, system): """ Create an object that provides a MDAnalysis topology and a coordinate reader Parameters ---------- system : an instance of the espressomd.System() class Returns ------- a Stream class Properties ---------- trajectory: returns a named pipe with the information about the current frame topology : a topology for MDAnalysis >>> eos = MDA_ESP.Stream(system) >>> u = mda.Universe(eos.topology,eos.trajectory) """ self.topology = ESPParser(None, espresso=system).parse() self.system = system @property def trajectory(self): """ Particles' coordinates at the current time Returns ------- A named stream in the format that can be parsed by ESPReader() """ # time _xyz = str(self.system.time) + '\n' # number of particles _xyz += str(len(self.system.part)) + '\n' # box edges _xyz += str(self.system.box_l) + '\n' # configuration for _p in self.system.part: _xyz += str(_p.pos) + '\n' for _p in self.system.part: _xyz += str(_p.v) + '\n' for _p in self.system.part: _xyz += str(_p.f) + '\n' return NamedStream(StringIO(_xyz), "__.ESP") class ESPParser(TopologyReaderBase): """ An MDAnalysis reader of espresso's topology """ format = 'ESP' def __init__(self, filename, **kwargs): self.kwargs = kwargs def parse(self): """ Access ESPResSo data and return the topology object Returns ------- an MDAnalysis Topology object """ espresso = self.kwargs['espresso'] names = [] atomtypes = [] masses = [] charges = [] for p in espresso.part: names.append("A" + repr(p.type)) atomtypes.append("T" + repr(p.type)) masses.append(p.mass) charges.append(p.q) natoms = len(espresso.part) attrs = [Atomnames(np.array(names, dtype=object)), Atomids(np.arange(natoms) + 1), Atomtypes(np.array(atomtypes, dtype=object)), Masses(masses), Resids(np.array([1])), Resnums(np.array([1])), Segids(np.array(['System'], dtype=object)), AltLocs(np.array([' '] * natoms, dtype=object)), Resnames(np.array(['R'], dtype=object)), Occupancies(np.zeros(natoms)), Tempfactors(np.zeros(natoms)), ICodes(np.array([' '], dtype=object)), Charges(np.array(charges)), ] top = Topology(natoms, 1, 1, attrs=attrs) return top class Timestep(base.Timestep): _ts_order_x = [0, 3, 4] _ts_order_y = [5, 1, 6] _ts_order_z = [7, 8, 2] def _init_unitcell(self): return np.zeros(9, dtype=np.float32) @property def dimensions(self): # This information now stored as _ts_order_x/y/z to keep DRY x = self._unitcell[self._ts_order_x] y = self._unitcell[self._ts_order_y] z = self._unitcell[self._ts_order_z] # this ordering is correct! (checked it, OB) return triclinic_box(x, y, z) @dimensions.setter def dimensions(self, box): x, y, z = triclinic_vectors(box) np.put(self._unitcell, self._ts_order_x, x) np.put(self._unitcell, self._ts_order_y, y) class ESPReader(SingleFrameReaderBase): """ An MDAnalysis single frame reader for the stream provided by Stream() """ format = 'ESP' units = {'time': None, 'length': 'nm', 'velocity': 'nm/ps'} _Timestep = Timestep def _read_first_frame(self): with util.openany(self.filename, 'rt') as espfile: n_atoms = 1 for pos, line in enumerate(espfile, start=-3): if (pos == -3): time = float(line[1:-1]) elif(pos == -2): n_atoms = int(line) self.n_atoms = n_atoms positions = np.zeros( self.n_atoms * 3, dtype=np.float32).reshape(self.n_atoms, 3) velocities = np.zeros( self.n_atoms * 3, dtype=np.float32).reshape(self.n_atoms, 3) forces = np.zeros( self.n_atoms * 3, dtype=np.float32).reshape(self.n_atoms, 3) self.ts = ts = self._Timestep( self.n_atoms, **self._ts_kwargs) self.ts.time = time elif(pos == -1): self.ts._unitcell[:3] = np.array( list(map(float, line[1:-2].split()))) elif(pos < n_atoms): positions[pos] = np.array( list(map(float, line[1:-2].split()))) elif(pos < 2 * n_atoms): velocities[pos - n_atoms] = np.array( list(map(float, line[1:-2].split()))) else: forces[pos - 2 * n_atoms] = np.array( list(map(float, line[1:-2].split()))) ts.positions = np.copy(positions) ts.velocities = np.copy(velocities) ts.forces = np.copy(forces)
hmenke/espresso
src/python/espressomd/MDA_ESP/__init__.py
Python
gpl-3.0
8,024
[ "ESPResSo", "MDAnalysis" ]
6f1b87830da875ef9887519adad49e656e7b0dca3b81e7db57b48920b2ca43bc
# -*- coding: utf-8 -*- #MIT License #Copyright (c) 2017 Marton Kelemen #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. from ....application.utils import plotgen from ....application.utils import geno_qc from ....application.logic.knet import knet_main from ....io import knet_IO import gc import numpy as np from numpy.linalg import norm from scipy import stats from pathlib import Path import random import os lastLayerSize_MAX = int(1000) # int(4096 /2) # delta = (Ve/Vg) # delta = (1-h2) / h2 #args, args.epochs, args.learnRate, args.momentum, args.evalFreq, args.savFreq, args.predictPheno, args.loadWeights, args.saveWeights, args.randomSeed, args.hidCount, args.hidl2, args.hidAct # V(G) 0.168545 0.004763 #V(e) 0.006826 0.002168 def addActivation(myNet, hidAct): if hidAct == 1 : H_Act = knet_main.knSigmoid( myNet) elif hidAct == 2 : H_Act = knet_main.knRELU( myNet) elif hidAct == 3 : print("no activatioN") elif hidAct == 5 : H_Act = knet_main.knLeakyRELU( myNet) else : H_Act = knet_main.knSoftplus( myNet) def getNetworkStructure(myNet) : layernum = 0 for layerIndex in range(0,len(myNet.layers)) : layer = myNet.layers[layerIndex] if type(layer) == knet_main.knnLayer: # for non input types, we have if layer.Weights_W is not None : layernum += 1 print("layer " + str(layernum) + " has weight matrix shaped: " + str(layer.Weights_W.shape)) def runKnet(args) : hLayerCount = args.hidCount hiddenShrinkage = args.hidl2 # default QC settings used for all non AMBLUP versions _minObserved = 0.95 _minMAF = 0.01 _minVariance = 0.02 # load plink binary / phenotypes want to load them here, so that we can release the memory once the raw data is no longer used cc = True if args.cc == 0 : cc = False recodecc = True if args.recodecc == 0 : recodecc = False genotypeData = knet_IO.loadPLINK(args.knet, loadPhenos = False) M = genotypeData["M"] irsIds = genotypeData["rsid"] IDs = genotypeData["IDs"] indicesKept = np.asarray( range(M.shape[1]) ) del genotypeData ; gc.collect() # dont need this y = knet_IO.loadPLINKPheno(args.pheno, caseControl = cc, recodeCaseControl = recodecc) y = stats.zscore(y) # zscore it so that Beta -> h2 computations work # if we have a validation set M_validation = None y_validation = None if args.validSet : genotypeData = knet_IO.loadPLINK(args.validSet, loadPhenos = False, replaceMissing = True) # want to replace -1 with 0s, as we otherwise would have -1s, as later we just delete indices that failed QC for the training set, but won't care for individual missing datas M_validation = genotypeData["M"] IDs_validation = genotypeData["IDs"] print("Loaded number of people for validatin: ", len(M_validation), flush=True ) del genotypeData ; gc.collect() # dont need this if args.validPhen : y_validation = knet_IO.loadPLINKPheno(args.validPhen, caseControl = cc, recodeCaseControl = recodecc) y_validation = stats.zscore(y_validation) # zscore it so that Beta -> h2 computations work if args.inference == 0 : # 1. standardise data if args.qc == 1 : qc_data = geno_qc.genoQC_all(M, rsIds = irsIds, minObserved = _minObserved, minMAF = _minMAF, minVariance = _minVariance) # we MUST perform QC with the EXACT SAME settings as the 'region scanner' otherwise the region coordinates will be mismatched #M = qc_data["X"] rsIds_qc = qc_data["rsIds"] # save away the surviving SNP list that we have used indicesToRemove = qc_data["indicesToRemove"] indicesKept = qc_data["indicesKept"] irsIds = rsIds_qc.tolist() del qc_data; gc.collect() # overwrite qc_data = geno_qc.removeList(M, indicesToRemove) M = qc_data["X"] del qc_data; gc.collect() # overwrite else : print("Skipping internal QC", flush=True) M, mns, sstd = geno_qc.standardise_Genotypes(M) ; gc.collect() print("After standardising, training data in MBs is: ",geno_qc.getSizeInMBs(M) ) else : print("Inference data QC", flush=True) if args.snpIndices is not None : indicesToKeep = knet_IO.loadIndices(args.snpIndices) M = M[:,indicesToKeep] mns = knet_IO.loadVectorFromDisk( args.mns , 'float32') # these are always float32 even in 64 runs sstd = knet_IO.loadVectorFromDisk( args.sstd , 'float32') snpIDs = knet_IO.loadsnpIDs(args.snpIDs) M = M.astype('float32') M -= mns M /= sstd # load final list of RSids # load mean /SDs #M = geno_qc.standardise_Genotypes(M) ; gc.collect() #print("After standardising, training data in MBs is: ",geno_qc.getSizeInMBs(M) ) # get Zscores: have to standardise ONLY over the training, and not the training+ validation together: https://blog.slavv.com/37-reasons-why-your-neural-network-is-not-working-4020854bd607 # will have to implement this for genetic data if M_validation is not None : if args.qc == 1 : # depending on if we are in inference mode, make sure we have the same set of SNPs if args.inference == 0 : M_validation = np.delete(M_validation, indicesToRemove, axis=1) else : M_validation = M_validation[:,indicesToKeep] #qc_data = geno_qc.removeList(M_validation, indicesToRemove) M_validation = M_validation.astype('float32') M_validation -= mns M_validation /= sstd indices_validation = np.asarray( range(len(M_validation)) ) # is used for storting print("After standardising, validation data in MBs is: ",geno_qc.getSizeInMBs(M_validation) ) # Pre-process data: evalTrainResults = True BNEnabled = int(args.bnorm) == 1 decay_Enabled = int(args.lr_decay) == 1 # Shuffle data before producing the minibatches to avoid having all-case or all-control minibatches np.random.seed(args.randomSeed) random.seed(args.randomSeed) indices = np.asarray( range(len(M)) ) # is used for storting random.shuffle(indices) M = M[indices] y = y[indices] IDs[0] = np.array(IDs[0]) IDs[1] = np.array(IDs[1]) IDs[0] = IDs[0][indices] IDs[1] = IDs[1][indices] # reshape data to be the right dimensions for Convolutions if args.convLayers > 0 : M = M.reshape(M.shape[0], 1 , 1, M.shape[1]) if M_validation is not None : M_validation = M_validation.reshape(M_validation.shape[0], 1 , 1, M_validation.shape[1]) # 2. create minibatch list numIndividuals = M.shape[0] numSNPs = M.shape[1] # numSNPs = bed.get_nb_markers(), as we may have removed SNPs, we want to know how many are left len_M = len(M) len_M_validation = 0 train_GWAS = list() train_y = list() minibatch_size = args.batch_size #M.shape[0] # 64 if args.batch_size == 0 : minibatch_size = len(M) num_batches = len(M) // minibatch_size # scale the delta by minibatch_size, if we dont have minibatches ratio = float(minibatch_size) / numIndividuals # this is 1 if there are no minibatches print("orig L2 Regularizer : " + str(hiddenShrinkage) + " minibatches scaled to " + str(hiddenShrinkage * ratio) ) hiddenShrinkage *= ratio start = 0 end = minibatch_size # for i in range(num_batches) : # train_GWAS.append(M[start:end] ) # train_y.append(y[start:end]) # print("adding batch " + str(i) + " , start/end: " + str(start) + "/" + str(end) ) # start = end # end += minibatch_size y_batched = y.copy() # do this in a more RAM efficient way: keep deleting the bits from the original matrix to free up space as we go along otherwise this step would double the RAM requirements temporarily for i in range(num_batches) : train_GWAS.append(M[0:minibatch_size] ) M = M[minibatch_size:len(M)] train_y.append(y_batched[0:minibatch_size]) y_batched = y_batched[minibatch_size:len(y_batched)] print("adding batch " + str(i) + ", minibatch size: " + str(minibatch_size) + " / num left in pool: " + str(len(M)) ) gc.collect() print("train_GWAS[0].shape: " + str( train_GWAS[0].shape) + " // train_y.shape: " + str( train_y[0].shape) ) del M; gc.collect() # free up memory if M_validation is not None : len_M_validation = len(M_validation) if args.batch_size == 0 : minibatch_size = len(M_validation) test_GWAS = list() test_y = list() evalResults = True num_batches = len(M_validation) // minibatch_size print("len_M_validation is: " + str(len_M_validation) + ", minibatch size: " + str(minibatch_size) + " args.batch_size: " + str(args.batch_size) + " num_batches is: " + str(num_batches)) start = 0 end = minibatch_size for i in range(num_batches) : test_GWAS.append(M_validation[start:end] ) test_y.append(y_validation[start:end]) print("adding batch " + str(i) + " , start/end: " + str(start) + "/" + str(end) ) start = end end += minibatch_size # del M_validation; gc.collect() # free up memory, cant do this as we need this for the PRS calculation.... else : test_GWAS = None test_y = None evalResults = False # 3. initialise network params floatPrecision = "float" +str(args.float) print("floatPrecision is: " + floatPrecision) knet_main.setDataType(floatPrecision) myNet = knet_main.knn(optimizer = args.optimizer) if args.gpu == 1 : print("attempting to init GPU", flush=True) knet_main.initGPU() print("GPU successfully set", flush=True) knet_main.set_seed(args.randomSeed) if args.orig == 1 : print("setting KNeT optimizer 0 to original version", flush=True) knet_main.KNET_ORIG = True Input = knet_main.knnLayer( myNet,np.array([-1]), knet_main.LAYER_SUBTYPE_INPUT) # if conv was enabled we then do NOT regularize stuff at the first FC layer as we only want to regularize by h2 once hiddenREGULARIRIZER = "REGULARIZER_RIDGE" shrinkage = hiddenShrinkage if args.convLayers > 0 : lastOutput = train_GWAS[0].shape[-1] # the input to the first layer is the last element of the shape array, eg: 33380 print("Adding "+str(args.convLayers)+" conv layers, with initial input dimension: " + str(lastOutput), flush=True) # first conv layer has special logic, we must make it so that size=stride, to avoid the massive space expansion # first, find the smallest size/stride that will result in a whole number output size: # for i in range(4,21) : # filter sizes of 4 to 20 are considered # trialOutput = lastOutput # currentStride = filter_size = i # trialOutput = (trialOutput - filter_size +2) / currentStride + 1 # print("trialOutput : " + str(trialOutput) + " / filter_size: " + str(filter_size) + " / currentStride: " + str(currentStride) ) # if trialOutput % 1 == 0 : # print("first Conv layer filter/stride will be: " + str(filter_size), flush=True) # break currentNumFilters= args.convFilters currentStride = 3 filter_size = 5 # as it turns out it is not actually a problem if the conv outputs something that isn't an integer, so we just need to downsample it Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = hiddenREGULARIRIZER, shrinkageParam = hiddenShrinkage, p_dropout = args.dropout, n_filters = currentNumFilters, h_filter=1, w_filter=filter_size, padding=1, stride=currentStride, oneD = True) if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN) addActivation(myNet,args.hidAct) lastOutput = (lastOutput - filter_size +2) / currentStride + 1 lastOutput = int(lastOutput) # as these can only be integers hiddenREGULARIRIZER = knet_main.REGULARIZER_NONE # only add regularizer for first layer, subsequent layers will always have none shrinkage = 0.0 currentStride = 1 pool_size = 2 for i in range(1, args.convLayers +1) : # decide on filter size, depending on input, Conv layers must always produce even outputs so that maxpool can half them filter_size = 3 if lastOutput % 2 != 0 : filter_size = 4 # if the current output is not even, then we have to use a filter size of 4, otherwise we get fractions after the maxpool operation ## currentNumFilters = (i+1) * args.convFilters currentNumFilters = currentNumFilters // 2 Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = hiddenREGULARIRIZER, shrinkageParam = shrinkage, p_dropout = args.dropout, n_filters = currentNumFilters, h_filter=1, w_filter=filter_size, padding=1, stride=currentStride, oneD = True) if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN) addActivation(myNet,args.hidAct) lastOutput = (lastOutput - filter_size +2) / currentStride + 1 lastOutput = int(lastOutput) # as these can only be integers MaxPool_Layer = knet_main.knnMaxPool(myNet, oneD = True) lastOutput = (lastOutput - pool_size) / pool_size + 1 # compute what dimensions the conv+maxpool operations are going to leave for the next layer # Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = "REGULARIZER_RIDGE", shrinkageParam = hiddenShrinkage, p_dropout = args.dropout, n_filters = 128, h_filter=1, w_filter=8, padding=1, stride=4, oneD = True) # if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN) # addActivation(myNet,args.hidAct) # MaxPool_Layer = knet_main.knnMaxPool(myNet, oneD = True) # # Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = "REGULARIZER_NONE", shrinkageParam = 0., p_dropout = args.dropout, n_filters = 128, h_filter=1, w_filter=5, padding=1, stride=2, oneD = True) # will have to be 6 for next one ( 8 for last one) # if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN) # addActivation(myNet,args.hidAct) # MaxPool_Layer = knet_main.knnMaxPool(myNet, oneD = True) # Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = "REGULARIZER_NONE", shrinkageParam = 0., p_dropout = args.dropout, n_filters = 128, h_filter=1, w_filter=5, padding=1, stride=1, oneD = True) # if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN) # addActivation(myNet,args.hidAct) # MaxPool_Layer = knet_main.knnMaxPool(myNet, oneD = True) # # Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = "REGULARIZER_NONE", shrinkageParam = 0., p_dropout = args.dropout, n_filters = 128, h_filter=1, w_filter=5, padding=1, stride=1, oneD = True) # if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN) # addActivation(myNet,args.hidAct) # MaxPool_Layer = knet_main.knnMaxPool(myNet, oneD = True) # Flatten_Layer = knet_main.knnFlatten(myNet) lastLayerSize = args.firstLayerSize #lastLayerSize_MAX for i in range(1,hLayerCount+1) : # iterate 1 based, otherwise we will get a reduction after the first layer, no matter the widthReductionRate, as 0 is divisible by anything if i > 1 or args.convLayers > 0 : # only add regularizer for first layer, subsequent layers will always have none hiddenREGULARIRIZER = knet_main.REGULARIZER_NONE shrinkage = 0.0 #if i == (hLayerCount-1) : lastWidth = 2 # enforce so that the last widht is always 2, ie 1 neuron makes it MORE like the other LESS likely H_Layer = knet_main.knnLayer(myNet, [lastLayerSize], knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = hiddenREGULARIRIZER, shrinkageParam = shrinkage, p_dropout = args.dropout) if BNEnabled : Bnorm = knet_main.knnBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN) addActivation(myNet,args.hidAct) print("added layer at depth: " + str(i) + " with width: " + str(lastLayerSize) + " / shrinkage: " + str(shrinkage)) # control the 'fatness' of the network: we reduce the width at a given rate: if this is 1, then at every subsequent layer, if its 2, then every 2nd layer etc if i % args.widthReductionRate == 0 : lastLayerSize = lastLayerSize // 2 if lastLayerSize < 2 : break # if Output = knet_main.knnLayer( myNet,np.array([ y.reshape(y.shape[0],-1).shape[1] ]), knet_main.LAYER_SUBTYPE_OUTPUT, regularizer = "REGULARIZER_NONE", shrinkageParam = 0.0) if len( y.shape) > 1 : Out_Act = knet_main.knSoftmax( myNet) #knet_main.checkConvOutput(myNet, [1,5194 ]) if args.convLayers > 0 : knet_main.checkConvOutput(myNet, [*train_GWAS[0][0][0].shape]) knet_main.getNetworkMemUsage(myNet,train_GWAS[0].shape) # of RAM if args.inference == 0 : print("Analysis Run", flush = True) results = myNet.learn(train_GWAS, train_y, test_GWAS, test_y, eval_test=evalResults,eval_train=evalTrainResults, num_epochs=args.epochs, eta=args.learnRate, eval_freq = args.evalFreq, friction = args.momentum, decayEnabled = decay_Enabled) getNetworkStructure(myNet) #writeKNetParamsToDisk(myNet, "C:/0Datasets/NNs/knet_genetic_fc/knet") if args.saveWeights is not None : writeKNetParamsToDisk(myNet, args.saveWeights, knet_main.NETWORK_DATATYPE) # write epoch results out results_its = results["results"]#["results"] os.makedirs(os.path.dirname(args.out), exist_ok=True) #write training data means / stds to disk so that we could use those for inference runs later print("writing means/stds to disk with datatype: " + str(sstd.dtype)) print("sstd shape is: " + str(sstd.shape) + " / mns shape: " + str(mns.shape)) knet_IO.writeVectorToDisk( args.out + "data_mns" , mns, mns.dtype) knet_IO.writeVectorToDisk( args.out + "data_sstd" , sstd, sstd.dtype) fileName = args.out + "nn_results.txt" with open(fileName, "w") as file: line = "epochs" if "train_accuracy" in results_its: line = line + "\t" + "train_accuracy" if "test_accuracy" in results_its: line = line + "\t" + "test_accuracy" file.write(line + "\n") for i in range( len(results_its["epochs"]) ): line = str(results_its["epochs"][i]) if "train_accuracy" in results_its: line = line + "\t" + str(results_its["train_accuracy"][i]) if "test_accuracy" in results_its: line = line + "\t" + str(results_its["test_accuracy"][i]) file.write(line + "\n") # generate plot of the results if len(results_its["epochs"]) > 0 : plotgen.exportNNPlot(results_its, args.out + "nnplot") # write out the SNPs that were used for the analysis fileName = args.out + "nn_SNPs.txt" with open(fileName, "w") as file: for i in range( len(irsIds) ): file.write(irsIds[i] + "\n") # write out the indices of the original dataset's coordinates for convenience if indicesKept is not None: # in case we skipped QC fileName = args.out + "nn_SNPs_indices.txt" with open(fileName, "w") as file: for i in range( len(indicesKept) ): file.write( str(indicesKept[i]) + "\n") if len_M_validation > 0 : producePRS(myNet,M_validation, test_GWAS, IDs_validation, len_M_validation , args.out + "yhat.txt", args.out + "FIDs.txt", y_validation, args.out + "KNET_PRS") # # write final predictions out # yhats = list() # totalSofar= 0 # for i in range(len(test_GWAS)) : # loop through all minbatches # totalSofar += len(test_GWAS[i]) # yhats.append( myNet.forward_propagate(test_GWAS[i],False, forceCast_toCPU = True) ) # # # if totalSofar < len_M_validation : # print("minibatches did not cover all training samples, so we create last batch out of the remainders") # lastBatch_X = M_validation[totalSofar:len_M_validation] # yhats.append( myNet.forward_propagate(lastBatch_X,False, forceCast_toCPU = True) ) # # # #yhats = list() # #yhats.append( np.array([ [0],[1],[2] ])) # #yhats.append( np.array([ [3],[4],[5] ])) # #yhats.append( np.array([ [6],[7],[8] ])) # yhat_all = np.concatenate(yhats) # print("after merging, we have yhat predictions for : " + str(len(yhat_all)) + " samples", flush=True) # # print("yhat_all.shape: " + str(yhat_all.shape) + " // indices_validation.shape: " + str(indices_validation.shape) + " // indices.shape: " + str(indices.shape) ) # # # fileName = args.out + "yhat.txt" # with open(fileName, "w") as file: # file.write("Profile" + "\n") # # # for i in range(yhat_all.shape[0]) : # line = str(yhat_all[i][0] ) # for j in range(1, len(yhat_all[i]) ): # line = line + "\t" + str(yhat_all[i][j] ) # # file.write( line + "\n") # file.write( ( str(yhat[i])[2:-1] ).replace(" ", " ").replace(" ", "\t") + "\n") # # # also write out the FID / IIDs in the same order, just as a sanity check (compare this against the .fam files) # # fileName = args.out + "FIDs.txt" # with open(fileName, "w") as file: # file.write("FID" + "\t" + "IID" + "\n") # # for i in range( len(IDs_validation[0]) ) : # line = IDs_validation[0][i] + "\t" + IDs_validation[1][i] # # file.write( line + "\n") else : print("Inference Run", flush = True) loadKNetParams(myNet, args.loadWeights, knet_main.NETWORK_DATATYPE) if args.garson == 1: print("producing importance scores via the garson algorithm") NNinference = myNet.dream_Garson() else : print("producing importance scores via deep dreaming") os.makedirs(os.path.dirname(args.out), exist_ok=True) # forward propagate with the 1st sample of the training set yhat = myNet.forward_propagate(train_GWAS[0], train = False, saveInput = False, forceCast_toCPU = True) suppressPrint_orig = myNet.suppressPrint myNet.suppressPrint = True StartImage = None #StartImage = np.random.normal( size=(1,X_test.shape[1])) print("producing inference with number of iterations: " + str(args.dreamit), flush=True) dream = myNet.dream(0, 100,StartImage,args.dreamit , mFilterSize = 0, blur = 0.0, l2decay = 0.0, small_norm_percentile = 0,lr = 1.5,normalize = False, small_val_percentile = 0) NNinference = dream[0].ravel() NNinference[np.isnan(NNinference)]=0.0 myNet.suppressPrint = suppressPrint_orig # Here this would need to be more constrained: # both LD and MAF need to be taken into account knet_IO.writeSNPeffects(args.out + "dream",snpIDs, NNinference) # the validation here will refer to the TEST set if len_M_validation > 0 : producePRS(myNet,M_validation, test_GWAS, IDs_validation, len_M_validation , args.out + "yhat.txt", args.out + "FIDs.txt", y_validation, args.out + "KNET_PRS") def producePRS(myNet,origData, miniBatches, IndiIDs, len_total , outLoc_yhat, outLoc_FIDs, ytrue, outLoc_PRS) : # write final predictions out yhats = list() totalSofar= 0 for i in range(len(miniBatches)) : # loop through all minbatches totalSofar += len(miniBatches[i]) yhats.append( myNet.forward_propagate(miniBatches[i],False, forceCast_toCPU = True) ) if totalSofar < len_total : print("minibatches did not cover all training samples, so we create last batch out of the remainders") lastBatch_X = origData[totalSofar:len_total] yhats.append( myNet.forward_propagate(lastBatch_X,False, forceCast_toCPU = True) ) yhat_all = np.concatenate(yhats) print("after merging, we have yhat predictions for : " + str(len(yhat_all)) + " samples", flush=True) fileName = outLoc_yhat with open(fileName, "w") as file: file.write("Profile" + "\n") for i in range(yhat_all.shape[0]) : line = str(yhat_all[i][0] ) for j in range(1, len(yhat_all[i]) ): line = line + "\t" + str(yhat_all[i][j] ) file.write( line + "\n") # file.write( ( str(yhat[i])[2:-1] ).replace(" ", " ").replace(" ", "\t") + "\n") # also write out the FID / IIDs in the same order, just as a sanity check (compare this against the .fam files) fileName = outLoc_FIDs with open(fileName, "w") as file: file.write("FID" + "\t" + "IID" + "\n") for i in range( len(IndiIDs[0]) ) : line = IndiIDs[0][i] + "\t" + IndiIDs[1][i] file.write( line + "\n") # write out the final r^2 yhat_all += knet_main.EPSILON # for numerical stability rSQ = np.corrcoef( ytrue, yhat_all, rowvar=0)[1,0]**2 with open(outLoc_PRS, "w") as file: file.write(str(rSQ) ) def writeKNetParamsToDisk(myNet, targetDir, datatype = 'float32') : os.makedirs(os.path.dirname(targetDir), exist_ok=True) for i in range( len(myNet.layers) ) : if myNet.layers[i] : if isinstance(myNet.layers[i],knet_main.knnLayer) or isinstance(myNet.layers[i],knet_main.knnSpatialBatchNorm) or isinstance(myNet.layers[i],knet_main.knnBatchNorm) : # if its a layer with trainable params if myNet.layers[i].subtype != knet_main.LAYER_SUBTYPE_INPUT : # if its not an input layer # it has at least 6 trainable params: Weights, Momentum, Past_Grads, (2x for bias too) print("writing params for layer " + type(myNet.layers[i]).__name__ ) # MUST cast them to CPU before attempting to write out, otherwise GPU will hang there knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_w" , knet_main.castOutputToCPU(myNet.layers[i].Weights_W), datatype) knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_wb" , knet_main.castOutputToCPU(myNet.layers[i].Weights_bias), datatype) knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_m" , knet_main.castOutputToCPU(myNet.layers[i].Momentum), datatype) knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_mb" , knet_main.castOutputToCPU(myNet.layers[i].Bias_Momentum), datatype) knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_p" , knet_main.castOutputToCPU(myNet.layers[i].Past_Grads), datatype) knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_pb" , knet_main.castOutputToCPU(myNet.layers[i].Past_Grads_bias), datatype) if isinstance(myNet.layers[i],knet_main.knnSpatialBatchNorm) or isinstance(myNet.layers[i],knet_main.knnBatchNorm) : # if it is a batchnorm type, then it will have another 2 trainable params knet_IO.writeVectorToDisk( targetDir + "_" + str(i)+ "_rv" , knet_main.castOutputToCPU(myNet.layers[i].running_var), datatype) knet_IO.writeVectorToDisk( targetDir + "_" + str(i)+ "_rm" , knet_main.castOutputToCPU(myNet.layers[i].running_mean),datatype) def loadKNetParams(myNet, targetDir, datatype = 'float32') : for i in range( len(myNet.layers) ) : if myNet.layers[i] : if isinstance(myNet.layers[i],knet_main.knnLayer) or isinstance(myNet.layers[i],knet_main.knnSpatialBatchNorm) or isinstance(myNet.layers[i],knet_main.knnBatchNorm) : # if its a layer with trainable params if myNet.layers[i].subtype != knet_main.LAYER_SUBTYPE_INPUT : # if its not an input layer # it has at least 6 trainable params: Weights, Momentum, Past_Grads, (2x for bias too) print("loading params for layer " + type(myNet.layers[i]).__name__ ) myNet.layers[i].Weights_W = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_w" ,datatype) myNet.layers[i].Weights_bias = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_wb",datatype ) myNet.layers[i].Momentum = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_m",datatype ) myNet.layers[i].Bias_Momentum = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_mb",datatype ) myNet.layers[i].Past_Grads = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_p" ,datatype) myNet.layers[i].Past_Grads_bias = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_pb" ,datatype) if isinstance(myNet.layers[i],knet_main.knnSpatialBatchNorm) or isinstance(myNet.layers[i],knet_main.knnBatchNorm) : # if it is a batchnorm type, then it will have another 2 trainable params myNet.layers[i].running_var = knet_IO.loadVectorFromDisk( targetDir + "_" + str(i)+ "_rv" ,datatype) myNet.layers[i].running_mean = knet_IO.loadVectorFromDisk( targetDir + "_" + str(i)+ "_rm",datatype) myNet.connectLayers() # inputData = train_GWAS[0] # outPutdata = train_y[0] def performGradientCheck(myNet, inputData, outPutdata) : # the net, Standardised SNPs, and y # Gradient Test grad_current = myNet.getCurrentWeightGradients(inputData, outPutdata) numgrad = myNet.gradientCheck(inputData, outPutdata) myNorm = norm(grad_current-numgrad)/norm(grad_current+numgrad) return(myNorm )
mkelcb/knet
knet/com/application/logic/knet/knet_manager.py
Python
mit
32,687
[ "NEURON" ]
0f1e47fc6d2a64d1b9313dbd4030e2a1d552c84b26af930107ce343e8ecc8cb9
# This code is so you can run the samples without installing the package import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) # testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q" tags = "particles, Galaxy" import pyglet import cocos from cocos.director import director from cocos.actions import * from cocos.layer import * from cocos.particle_systems import * class L(Layer): def __init__(self): super( L, self).__init__() # p = Fireworks() # p = Explosion() # p = Fire() # p = Flower() # p = Sun() # p = Spiral() # p = Meteor() p = Galaxy() p.position = (320,240) self.add( p ) def main(): director.init( resizable=True ) main_scene = cocos.scene.Scene() main_scene.add( L() ) director.run( main_scene ) if __name__ == '__main__': main()
eevee/cocos2d-mirror
test/test_particle_galaxy.py
Python
bsd-3-clause
911
[ "Galaxy" ]
b6fcb1d08f1f47b2c4c9089369ee9a3fb9d1e9bd2dd6af353b8dbdea3be07b36
import os import tempfile from ase.io import write import ase.parallel as parallel from ase.old import OldASEListOfAtomsWrapper def view(atoms, data=None, viewer='ag', repeat=None, block=False): # Ignore for parallel calculations: if parallel.size != 1: return if hasattr(atoms, 'GetUnitCell'): # Convert old ASE ListOfAtoms to new style. atoms = OldASEListOfAtomsWrapper(atoms).copy() vwr = viewer.lower() if vwr == 'ag': format = 'traj' if repeat is None: command = 'ag' else: command = 'ag --repeat=%d,%d,%d' % tuple(repeat) repeat = None elif vwr == 'vmd': format = 'cube' command = 'vmd' elif vwr == 'rasmol': format = 'pdb' command = 'rasmol -pdb' elif vwr == 'xmakemol': format = 'xyz' command = 'xmakemol -f' elif vwr == 'gopenmol': format = 'xyz' command = 'rungOpenMol' elif vwr == 'avogadro': format = 'cube' command = 'avogadro' else: raise RuntimeError('Unknown viewer: ' + viewer) fd, filename = tempfile.mkstemp('.' + format, 'ase-') fd = os.fdopen(fd, 'w') if repeat is not None: atoms = atoms.repeat() if data is None: write(fd, atoms, format=format) else: write(fd, atoms, format=format, data=data) fd.close() if block: os.system('%s %s' % (command, filename)) else: os.system('%s %s &' % (command, filename)) os.system('(sleep 60; rm %s) &' % filename)
slabanja/ase
ase/visualize/__init__.py
Python
gpl-2.0
1,574
[ "ASE", "Avogadro", "RasMol", "VMD" ]
43875aac58a66ed75f72aad8b3fdf35edc4075e10423a0118eaf0fc2359651a3
import os import re import sys from string import * import commands import subprocess try: import cPickle as pickle except: import pickle from copy import copy, deepcopy from math import log as math_log, sqrt import random NUMBERS='0123456789' DNA_LETT='ACGT' SS_LETT='0SETBH' AA_LETT='ACDEFGHIKLMNPQRSTVWYUOB' # including selenocysteine AA_LETT_STRICT='ACDEFGHIKLMNPQRSTVWY' RNA_LETT='ACGU' three_letter_codon_diz={ 'Ala':'A' , 'Cys':'C' , 'Asp':'D' , 'Glu':'E' , 'Phe':'F' , 'Gly':'G' , 'His':'H' , 'Ile':'I' , 'Lys':'K' , 'Leu':'L' , 'Met':'M' , 'Asn':'N' , 'Pro':'P' , 'Gln':'Q' , 'Arg':'R' , 'Ser':'S' , 'Thr':'T' , 'Val':'V' , 'Trp':'W' , 'Tyr':'Y', '***':'*', 'Unk':'X' , '<->':'-', '---':'-','Asx':'B', 'SeC':'U', 'Zed':'Z', 'SeC(e)':'U' } one_to_three_letter_codon_diz={ 'A':'Ala' , 'C':'Cys' , 'D':'Asp' , 'E':'Glu' , 'F':'Phe' , 'G':'Gly' , 'H':'His' , 'I':'Ile' , 'K':'Lys' , 'L':'Leu' , 'M':'Met' , 'N':'Asn' , 'P':'Pro' , 'Q':'Gln' , 'R':'Arg' , 'S':'Ser' , 'T':'Thr' , 'V':'Val' , 'W':'Trp' , 'Y':'Tyr', '*':'***', 'X':'Unk' , '-':'---' , 'B':'Asx' } STOP_CODONS= ['UAA', 'UAG', 'UGA', 'URA', 'UAR'] STOP_CODONS_DNA=['TAA', 'TAG', 'TGA', 'TRA', 'TAR'] one_letter_to_three_letter_aa_diz={'A':'alanine', 'R':'arginine', 'N':'asparagine','D':'aspartic_acid','C':'cysteine','E':'glutamic_acid','Q':'glutamine','G':'glycine','H':'histidine','I':'isoleucine','L':'leucine','K':'lysine','M':'methionine','F':'phenylalanine','P':'proline','S':'serine','T':'threonine','W':'tryptophan','Y':'tyrosine','V':'valine','U':'selenocysteine'} try: from numpy import average, std as std_deviation except: sys.exc_clear() def average(ilist): return sum(ilist)/float(len(ilist)) def std_deviation(ilist): a=average(ilist) return sqrt( sum( [pow( v-a, 2) for v in ilist] )/len(ilist) ) def set_temp_folder(folder): set_MMlib_var('temp_folder', folder) def set_split_folder(folder): set_MMlib_var('split_folder', folder) def get_temp_folder(): return get_MMlib_var('temp_folder') def get_split_folder(): return get_MMlib_var('split_folder') def set_local_folders(temp='/tmp'): """ Used in ipython to quickly set the environment for fetching chromosomes and other stuff that required temp files""" try: assert is_directory(opt['temp']) except: opt['temp']=temp temp_folder=Folder(random_folder(opt['temp'])); test_writeable_folder(temp_folder, 'temp_folder'); set_temp_folder(temp_folder) split_folder=Folder(opt['temp']); test_writeable_folder(split_folder); set_split_folder(split_folder) def mute(also_stderr=False): """ Turns off any output to stdout (to stderr as well if option is True). To go back to normal , use unmute()""" sys.stdout = open(os.devnull, "w") if also_stderr: sys.stderr = open(os.devnull, "w") def unmute(): sys.stdout = sys.__stdout__; sys.stderr = sys.__stderr__ def phylome_connector(): """ ete2 connector to PhylomeDB. allows retrieving data such as trees, alignments etc. Examples: (calling p the object returned by this function) p.get_algs("Phy0005QIK_DROME", 8) p.get_tree("Phy0005QIK_DROME", 8, best_tree = True) """ import ete2 p = ete2.PhylomeDB3Connector(host = "phylomedb.org", user = "phyReader", passwd = "phyd10.-Reader", db = 'phylomedb_3') p._algs = "alignment" p._trees = "tree" p._phylomes = "phylome" p._phy_content = "phylome_content" return p def bash(command, print_it=0): """Utility to run bash commands. a tuple (exit_status, message) is returned, where message includes both std input and stderr. If argument print_it==1 or the variable print_commands is defined in MMlib, the command is printed before execution. If variable bin_folder is defined in MMlib, this folder is added to bash $PATH before running the command. """ if 'print_commands' in globals() and print_commands: print_it=1 if print_it: write(command, 1) if 'bin_folder' in globals(): if not bin_folder == os.environ['PATH'].split(':')[0]: os.environ['PATH']=str(bin_folder)+':'+os.environ['PATH'] b1, b2= commands.getstatusoutput(command) return [b1, b2] def bbash(command, print_it=0, dont_die=0): """Utility to run bash commands. A string is returned, where including both std input and stderr. If the exit status of the command is different than 0, it is assumed something went wrong, so an exception is raised indicating the command and the output. If argument dont_die==1, no exception is raised and output is returned as normal. If argument print_it==1 or the variable print_commands is defined in MMlib, the command is printed before execution. If variable bin_folder is defined in MMlib, this folder is added to bash $PATH before running the command. """ if 'print_commands' in globals() and print_commands: print_it=1 if print_it: write(command, 1) cmnd=command if 'bin_folder' in globals(): if not bin_folder == os.environ['PATH'].split(':')[0]: os.environ['PATH']=str(bin_folder)+':'+os.environ['PATH'] bb=commands.getstatusoutput(cmnd) if bb[0]!=0 and not dont_die: raise Exception, 'COMMAND: ' + command+' ERROR: "'+bb[1]+' "' else: return bb[1] def bash_pipe(cmnd, print_it=0, return_popen=0, stdin=None): """ Open a filehandler which reads from a pipe opened in bash, given a command. Useful when you want to read one line at the time. If variable bin_folder is defined in MMlib, this folder is added to bash $PATH before running the command. stdin can be used to input large chunks of data, through a filehandler. valid formats are: stdin=existing_write_filehandler, or stdin='PIPE' (equivalent to subprocess.PIPE); in this last case you want to use return_popen to be able to access the handler (with this option, the popen object is returned instead of its stdout filehandler), like this: p=bash_pipe('command', stdin='PIPE', return_popen=True) print >> p.stdin, 'input_lines!' #repeat as many times as needed p.stdin.close() # important! as most programs wait for a EOF signal to give output, you need to close the input filehandler before reading the lines of output p.stdout.readline() # --> now you can read the output lines from this handler """ if stdin=='PIPE': stdin=subprocess.PIPE if 'print_commands' in globals() and print_commands: print_it=1 if 'bin_folder' in globals(): if not bin_folder == os.environ['PATH'].split(':')[0]: os.environ['PATH']=str(bin_folder)+':'+os.environ['PATH'] if print_it: write(cmnd, 1) s=subprocess.Popen(cmnd.split(), stdout=subprocess.PIPE, stdin=stdin, env=os.environ) if return_popen: return s else: return s.stdout def md5_executable(): b=bash('echo | md5sum') if not b[0]: return 'md5sum' ## command found, no error b=bash('echo | md5') if not b[0]: return 'md5' ## command found, no error else: raise Exception, "ERROR neither md5sum or md5 found on this system!" def checksum(ffile, is_text=False): """ Returns the checksum for the file in input """ if is_text: pipe=bash_pipe(md5_executable()+' ', return_popen=1, stdin='PIPE') print >> pipe.stdin, ffile pipe.stdin.close() m= pipe.stdout.readline().split()[0] else: m=bbash(md5_executable()+' '+ffile).split()[0] return m def Folder(string): if not string: return string ff=string+'/'*int(string[-1]!='/') cmnd='mkdir '+ff bb=bash(cmnd) return ff def random_folder(parent_folder='', dont_create_it=0): if parent_folder: parent_folder = Folder(parent_folder) #checking or creating parent folder. IF CRASHED: do you have writing privileges here? a=parent_folder+ bash("date +%F%T%N | "+md5_executable()+" | cut -c 1-32")[1] #creating a random named folder inside the parent folder if dont_create_it: return a+'/' a=Folder(a) if not bash('cd '+a+' ; cd ..')[0] : return a else: return 'some_error_creating_random_folder.hopefully_there_is_noooo_file_named_like_this_when_you_try_to_delete_it' temp_folder=random_folder('/tmp', 1) split_folder=Folder('/tmp') def set_MMlib_var(varname, value): globals()[varname]=value def get_MMlib_var(varname): return globals()[varname] def is_significant(pvalue): return pvalue<opt['alpha'] printed_rchar=0 # trans={}; # trans ['GCA'] = "A"; trans ['GCC'] = "A"; trans ['GCG'] = "A"; trans ['GCT'] = "A"; trans ['TGC'] = "C"; trans ['TGT'] = "C"; # trans ['GAC'] = "D"; trans ['GAT'] = "D"; trans ['GAA'] = "E"; trans ['GAG'] = "E"; trans ['TTC'] = "F"; trans ['TTT'] = "F"; # trans ['GGA'] = "G"; trans ['GGC'] = "G"; trans ['GGG'] = "G"; trans ['GGT'] = "G"; trans ['CAC'] = "H"; trans ['CAT'] = "H"; # trans ['ATA'] = "I"; trans ['ATC'] = "I"; trans ['ATT'] = "I"; trans ['AAA'] = "K"; trans ['AAG'] = "K"; trans ['TTA'] = "L"; # trans ['TTG'] = "L"; trans ['CTA'] = "L"; trans ['CTC'] = "L"; trans ['CTG'] = "L"; trans ['CTT'] = "L"; trans ['ATG'] = "M"; # trans ['AAC'] = "N"; trans ['AAT'] = "N"; trans ['CCA'] = "P"; trans ['CCC'] = "P"; trans ['CCG'] = "P"; trans ['CCT'] = "P"; # trans ['CAA'] = "Q"; trans ['CAG'] = "Q"; trans ['AGA'] = "R"; trans ['AGG'] = "R"; trans ['CGA'] = "R"; trans ['CGC'] = "R"; # trans ['CGG'] = "R"; trans ['CGT'] = "R"; trans ['AGC'] = "S"; trans ['AGT'] = "S"; trans ['TCA'] = "S"; trans ['TCC'] = "S"; # trans ['TCG'] = "S"; trans ['TCT'] = "S"; trans ['ACA'] = "T"; trans ['ACC'] = "T"; trans ['ACG'] = "T"; trans ['ACT'] = "T"; # trans ['GTA'] = "V"; trans ['GTC'] = "V"; trans ['GTG'] = "V"; trans ['GTT'] = "V"; trans ['TGG'] = "W"; trans ['TAC'] = "Y"; # trans ['TAT'] = "Y"; # trans ['taa'] = "!"; trans ['tag'] = "#"; trans ['tga'] = "@"; # trans ['TAA'] = "*"; trans ['TAG'] = "*"; trans ['TGA'] = "*"; # trans ['---'] = "-"; ## std: FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG # build alternative genetic code translation tables based on NCBI codes genetic_codes={} genetic_codes_AAs={ 1:'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 2:'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG', 3:'FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 4:'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 5:'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG', 6:'FFLLSSSSYYQQCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 9:'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG', 10:'FFLLSSSSYY**CCCWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 11:'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 12:'FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 13:'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG', 14:'FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG', 16:'FFLLSSSSYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 21:'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNNKSSSSVVVVAAAADDEEGGGG', 22:'FFLLSS*SYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 23:'FF*LSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 24:'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSSKVVVVAAAADDEEGGGG', 25:'FFLLSSSSYY**CCGWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 26:'FFLLSSSSYY**CC*WLLLAPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 27:'FFLLSSSSYYQQCCWWLLLAPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 28:'FFLLSSSSYYQQCCWWLLLAPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 29:'FFLLSSSSYYYYCC*WLLLAPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 30:'FFLLSSSSYYEECC*WLLLAPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG', 31:'FFLLSSSSYYEECCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'} for gc_code in genetic_codes_AAs: i=-1 genetic_codes[gc_code]={'---':'-'} for a in 'TCAG': for b in 'TCAG': for c in 'TCAG': i+=1 genetic_codes[gc_code][ a+b+c ]=genetic_codes_AAs[gc_code][i] trans=genetic_codes[1] retrotrans={} for codon in trans: retrotrans.setdefault( trans[codon], [] ).append(codon) for aa in retrotrans: retrotrans[aa].sort() species_code_file="/home/mmariotti/software/selenoprofiles/libraries/species_codes.tab" genome_config="/home/mmariotti/software/selenoprofiles/libraries/genome.config" def contain_chars(string, to_check=uppercase+lowercase): for char in string: if char in to_check: return 1 return 0 def is_number(string, mode='int'): if mode=='float': try: float(string) return True except ValueError: return False else: #mode =int try: int(string) return True except ValueError: return False def is_option(s): return (s[0]=='-' and contain_chars(s[1:])) def option_value(value): """value=string; returns value changed into the appropriate type. """ # if value.startswith('[') and value.endswith(']'): # write(value, 1, how='yellow') # return [option_value(x) for x in value[1:-1].split(', ') ] if is_number(value): return int(value) elif is_number(value, 'float'): return float(value) elif value=='None': return None else: if value and value[0]==value[-1] and value[0] in ['"', "'"] and len(value)>=2: value=value[1:-1] return value def update_opt(new_opt, current_opt): """sometimes it is useful to read options from command line, then manipulate them before using them. In this case, it is worth doing to update the opt object and sys.argv. Each option in new_opt (key) goes to replace the one in current_opt with the same name. Sys.argv is also updated.DONT KNOW ABOUT BOOLEAN VALUES """ for k in new_opt: current_opt[k]=new_opt[k] for c in range(len(sys.argv)): if sys.argv[c]=='-'+k: sys.argv[c+1]=new_opt[k] return current_opt def fill_option(default_opt, option, dont_add_to_argv=0): #fill the keys of option (dictionary like {option: value}) that are not present, with default values specified in default_opt for key in default_opt: if not option.has_key(key): option[key]=default_opt[key] if not dont_add_to_argv: sys.argv.extend(['-'+key, str(default_opt[key])]) return option class options(dict): """ """ def __init__(self, dict_in={}, synonyms={}): super(options, self).__init__() for k in dict_in: self[k]=dict_in[k] self.set_synonyms(synonyms) def __getitem__(self, name): if self.has_key(name): return super(options, self).__getitem__(name) elif self['__synonyms__'].has_key(name): return self[ self['__synonyms__'][name] ] elif name in self['__synonyms__'].values(): for k in self['__synonyms__']: if name== self['__synonyms__'][k]: return self[k] else: return None def set_synonyms(self, syno_hash): self['__synonyms__']=syno_hash def add_synonym(synonym, key): self['__synonyms__'][synonym]=key def synonyms(self): return self['__synonyms__'] def command_line(default_opt, command_line_help='Command line usage:...', default_order='io', synonyms={}, dont_die=0, silent=0, dont_add_to_argv=0, nowarning=0, strict=0, tolerated_regexp=[], advanced={}): import sys def command_line_option(): """this is a utility that read sys.argv and returns a dictionary like {option: value} options in command line must be preceded by -, and option can't consist only of number ; if after an option there is no value (for example there is another option), the value is set to 1. Numberic values are converted to their types; floating point numbers are discriminated from integers if they contains a dot '.' ex: [python] prog_name.py -c 12 -verbose -gap -0.45 ----> returns {'c': 12, 'g': -0.45, 'verbose':1} The default_opt contains all default values for compulsory arguments. These are taken from here if they are not find in the commandline. Also, sys.argv is enriched with these value, therefore if the command_line function is called again (even with specifying def_opt), the resulting returned opt will be the same. """ option=options() llist=sys.argv[1:]+['-EnD!!'] # write_to_file(str( sys.argv), 'btemp') c=0 #flushing empty values in argv while c<len(llist): if not llist[c]: llist.pop(c) else: c+=1 while llist[0]!='-EnD!!': first=str(llist.pop(0)) #option=option_value(value) if first[0]=='-': if is_option(llist[0]): #the next object in the row is a option. So this option is simply set to True (1) option[first[1:]]=1 else: value=llist.pop(0) option[first[1:]]=option_value(value) return option if silent: nowarning=1 opt=command_line_option() if default_order=='*': obj_list=[] i=1 while i<len(sys.argv) and ( not is_option(sys.argv[i]) ) : #reading as syntax is --> program_name file1 file2 file3 .... fileN -o 1 -n 7 obj_list.append( sys.argv[i] ) i+=1 if not obj_list and i<len(sys.argv): #it may be that the syntax used in the command line is also: program_name -o 1 -n 7 file1 file2 file3 .... i=1 while i<len(sys.argv): if not is_option(sys.argv[i]) and (not is_option(sys.argv[i-1])): obj_list.append( sys.argv[i] ) i+=1 opt['*']=obj_list else: i=1 while i<len(sys.argv) and i<=len(default_order) and ( not is_option(sys.argv[i]) ) : opt[ default_order[i-1] ]=option_value(sys.argv[i]) i+=1 synonyms['help']='h'; synonyms['-help']='h' opt.set_synonyms(synonyms) special_options=['h', 'print_opt', 'print_option', 'print_options', 'config', 'bin_folder', '__synonyms__'] for k in opt: if not default_opt.has_key(k) and not nowarning and not k in special_options and not k in opt.synonyms() and not match_any_word(k, tolerated_regexp, ignore_case=0) : if strict!=0: if type(strict) in [int, bool]: e=Exception elif issubclass(strict, Exception): e=strict raise e, "command_line ERROR the option "+k+" is not present among the possible options. run with -print_opt to see the current option configuration" printerr('WARNING possible typo: the option '+k+' is not present among the possible options. run with -print_opt to see the current option configuration\n') opt=fill_option(default_opt, opt, dont_add_to_argv) for k in opt: if type(opt[k]) == str and opt[k].startswith('$('): opt[k]=bbash(opt[k][2:-1]) #dealing with synonyms keyss=opt.keys() for k in keyss: if k in opt.synonyms(): opt[opt.synonyms()[k]]=opt[k] del opt[k] #printing options to screen in case we have -print_opt if not silent and( opt.has_key('print_opt') or opt.has_key('print_options') or opt.has_key('print_option') ): write( "| ACTIVE OPTIONS:", 1) keys=opt.keys() keys.sort() for k in keys: a="| "+str(k) write( a+' '*(30-len(a))+': '+str(opt[k]), 1) write('', 1) #printing help message in case we have -h or --help if not silent and (len(sys.argv)<2 or opt.has_key('h') ) : write(command_line_help, 1) if advanced and opt['h'] in advanced: write(advanced[opt['h']], 1) if not dont_die: sys.exit() return opt uniq_id=id def lineage_string_to_abstract(lineage): """ lineage is a string which is usually returned by this program. This function condensate it keeping the most interesting classes. """ splt=lineage.split('; ') if "Bacteria; " in lineage : return 'B; '+join(splt[2:min(5, len(splt))], '; ') elif 'Archaea; ' in lineage : return 'A; '+join(splt[2:min(5, len(splt))], '; ') elif 'Eukaryota; ' in lineage: out='E; ' if 'Metazoa; ' in lineage: out+='M; ' if 'Deuterostomia; ' in lineage: out+='Deuterostomia; ' if 'Vertebrata; ' in lineage: out+='Vertebrata; ' if 'Mammalia; ' in lineage: out+='Mammalia; ' elif 'Sauropsida; ' in lineage: out+='Sauropsida; ' elif 'Amphibia; ' in lineage: out+='Amphibia; ' elif 'Actinopterygii; ' in lineage: out+='Actinopterygii; ' elif 'Chondrichthyes; ' in lineage: out+='Chondrichthyes; ' elif 'Tunicata; ' in lineage: out+='Tunicata; ' if 'Ascidiacea; ' in lineage: out+='Ascidiacea; ' elif 'Branchiostomidae; ' in lineage: out+='Branchiostomidae; ' elif 'Echinodermata; ' in lineage: out+='Echinodermata; ' elif 'Protostomia; ' in lineage: out+='Protostomia; ' if 'Arthropoda; ' in lineage: out+='Arthropoda; ' if 'Insecta; ' in lineage: out+='Insecta; ' elif 'Crustacea; ' in lineage: out+='Crustacea; ' elif 'Myrapoda; ' in lineage: out+='Myrapoda; ' elif 'Arachnida; ' in lineage: out+='Arachnida; ' elif 'Merostomata; ' in lineage: out+='Merostomata; ' elif 'Nematoda; ' in lineage: out+='Nematoda; ' elif 'Mollusca; ' in lineage: out+='Mollusca; ' if 'Gastropoda; ' in lineage: out+='Gastropoda; ' elif 'Bivalvia; ' in lineage: out+='Bivalvia; ' elif 'Annelida; ' in lineage: out+='Annelida; ' else: out+= lineage.split('Protostomia; ')[1].split(';')[0]+'; ' else: #basal metazoan if 'Cnidaria; ' in lineage: out+='Cnidaria; ' elif 'Porifera; ' in lineage: out+='Porifera; ' elif 'Ctenophora; ' in lineage: out+='Ctenophora; ' elif 'Placozoa; ' in lineage: out+='Placozoa; ' elif 'Platyhelminthes; ' in lineage: out+='Platyhelminthes; ' else: out+= join(splt[2:min(4, len(splt))], '; ')+'; ' return out[:-2] else: return join(splt[0:min(4, len(splt))], '; ') def get_species_fullname(species_name): b=bash('egrep -w "'+species_name+'" '+species_code_file) if b[0]: raise Exception, "get_species_fullname ERROR: "+species_name+' not found' else: return b[1].split('\t')[1] def get_species_code(species_name): b=bash('egrep -w "'+species_name+'" '+species_code_file) if b[0]: raise Exception, "get_species_code ERROR: "+species_name+' not found' else: return b[1].split('\t')[0] def get_genome_file(species_fullname): b=bash('egrep "'+species_fullname+'.*=" '+genome_config) if b[0]: raise Exception, "get_genome_file ERROR: "+species_fullname+' not found' else: return del_white(b[1].split('=')[1]) def second_max(alist): """returns the second biggest number in a list. if it has only one element, that is returned. If it has none, it returns False """ current_max='init' current_second_max='init' for item in alist: if current_max=='init' or item>current_max: current_second_max=current_max current_max=item elif current_second_max=='init' or item>current_second_max: current_second_max=item if current_second_max=='init': if current_max=='init': return False return current_max return current_second_max blosum62_matrix={} def load_blosum(from_file="/home/mmariotti/software/selenoprofiles/libraries/BLOSUM62sel"): try: assert blosum62_matrix except: #ncbi format ordered_aminoacids=[] main_diz={} cfile=open(from_file, 'r') cline=cfile.readline() index_line=0 #fake, check below while cline: if cline[0]=='#': cline=cfile.readline() elif not ordered_aminoacids: ordered_aminoacids=cline.split() index_line=-1 else: for index_aa, num in enumerate(cline.split()[1:]): if not main_diz.has_key(ordered_aminoacids[index_line]): main_diz[ordered_aminoacids[index_line]]={} main_diz[ordered_aminoacids[index_line]][ordered_aminoacids[index_aa]]=int(num) cline=cfile.readline() index_line+=1 cfile.close() main_diz['U']=main_diz['*'] for k in main_diz: main_diz[k]['U']=main_diz[k]['*'] blosum62_matrix=main_diz return blosum62_matrix def blosum(aa1, aa2, matrix={}): if not matrix: matrix=load_blosum() if 'x' in [aa1, aa2]: #for my exonerate parser. actual Xs (uppercase) are treated as in the blosum return 0 #for my recoding of stop codons if aa1 in 'JZ': aa1='*' if aa2 in 'JZ': aa2='*' if matrix.has_key(aa1) and matrix[aa1].has_key(aa2): return matrix[aa1][aa2] else: raise Exception, "ERROR blosum score not defined for:"+aa1+' '+aa2 def similar_aas(aa1, aa2): '''returns True is the two aas are similar, false if not. They are defined as similar if they have a positive value in the blosum62 matrix ''' similar_diz={"A":["S"], "R":["Q","K"], "N":["D","H","S","B"], "D":["N","E","B","Z"], "C":[], "Q":["R","E","K","Z"], "E":["D","Q","K","B","Z"], "G":[], "H":["N","Y"], "I":["L","M","V"], "L":["I","M","V"], "K":["R","Q","E","Z"], "M":["I","L","V"], "F":["W","Y"], "P":[], "S":["A","N","T"], "T":["S"], "W":["F","Y"], "Y":["H","F","W"], "V":["I","L","M"]} if similar_diz.has_key(aa1): if similar_diz.has_key(aa2): return (aa2 in similar_diz[aa1]) return False def parsed_blast_to_summary(pline, program='tblastn', chars_per_line=60 ): return blasthit(pline).pretty_summary() def all_chars_in(astring): """ This function returns the list of characters contained in the input string. The characters are in the order of first appearance""" outlist=[]; chars_hash={} for c in astring: if not chars_hash.has_key(c): outlist.append(c) chars_hash[c]=1 return outlist def find_all(substring, sstring): """ Find all arg1 occunreces in arg2 , and returns their indices. work with overlapping occurrences """ l=len(substring); out=[] for pos in range(len(sstring)+1-l): if sstring[pos:pos+l]==substring: out.append(pos) return out default_genetic_code=1 def set_genetic_code(code): """ Set the default translation table to this code; Input is numerical, follows NCBI standards (e.g. 1 is standard, 6 is ciliate). This affects later calls of transl(seq) """ return set_MMlib_var('default_genetic_code', code) def get_genetic_code(): """ Get the default translation table code; numerical, follows NCBI standards""" return get_MMlib_var('default_genetic_code') def get_genetic_code_table(code=None): """ Returns a dictionary codon->aminoacid for the given code (numerical, NCBI standard) If code is not provided, the default set in MMlib is used""" if code is None: code=get_genetic_code() return genetic_codes[code] def transl(cds_seq, include_selenocysteine=False, gaps_to=None, code=None): '''translate a nucleotide sequence in aminoacids. Use code=X to give a integer identifying the genetic code to be used, as NCBI codes (see https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi) Use include_selenocysteine=1 to use U for TGA or UGA Use gaps_to to force a certain char for gaps codons (---); normally translated as - ''' if code is None: code=default_genetic_code out='' i=0 codon_table=genetic_codes[code] while i < len(cds_seq): codon= replace_chars(upper(cds_seq[i:i+3]), 'U', 'T') if include_selenocysteine and codon=='TGA': out+='U' elif gaps_to and codon=='---': out+=gaps_to elif codon_table.has_key(codon): out+=codon_table[codon] else: out+='X' i+=3 return out def retrotransl(aa_seq, gaps_to='', codon_hash={}): """translate an aminoacid sequence back to coding sequence. The first codon in alphabetical order is considered, unless a different codon_hash is provided. Argument gaps_to can be used to provide the character for gaps. Notice that a three character argument should be provided""" if not codon_hash: codon_hash=retrotrans out='' if len(gaps_to)==1: gaps_to*=3 if gaps_to and len(gaps_to)!=3: raise Exception, "retrotransl ERROR gaps_to should be a string with length 3 or 1! gaps_to="+str(gaps_to) for aa in aa_seq: aa=upper(aa) if gaps_to and aa=='-': out+=gaps_to elif codon_hash.has_key(aa): codon=codon_hash[aa] if type(codon)==list: codon=codon[0] #taking first in alphabetical order out+=codon else: out+='NNN' return out class e_v: """ Class for coping with evalues without going crazy because of out of memory """ def __init__(self, anything): self.string=str(anything) self.value='!' try: self.value=float(self.string) except ValueError: "nothing" def exponent(self): if 'e' in self.string: return int(self.string.split('e')[1]) else: if self.value =='!': raise ValueError if self.value==0: return -1000 elif self.value<1: a= str(self.value).split('.')[1] exp=-1 while a and a[0]=='0': exp-=1 a=a[1:] return exp elif self.value>=1: return len(str(self.value).split('.')[0])-1 def is_minor_than(self, other_e_v, or_equal=0): if type(other_e_v).__name__ in ["str",'int','float']: #converting if necessary other_e_v=e_v(str(other_e_v)) if self.exponent() < other_e_v.exponent(): return True elif self.exponent() > other_e_v.exponent(): return False else: if self.string == other_e_v.string: return or_equal else: if self.value== "!" or other_e_v.value== "!": raise ValueError, "ERROR in evalue class! can't compare the two evalues: "+self.string+' AND '+other_e_v.string else: if or_equal: return self.value <= other_e_v.value else: return self.value < other_e_v.value def __lt__(self, other): return self.is_minor_than(other) def __le__(self, other): return self.is_minor_than(other, or_equal=1) def __gt__(self, other): return not self.is_minor_than(other, or_equal=1) def __ge__(self, other): return not self.is_minor_than(other) def __repr__(self): return self.string def __str__(self): return self.string def __float__(self): return self.value def __int__(self): return int(self.value) def __eq__(self, other): return self.is_minor_than(other, or_equal=1) and not self.is_minor_than(other) def shortcut_log(evalue): """ utility for evalues e_v ... defining the log(0) as 200, since I think blast is not computing evalues < than e-200""" try: if 'e' in str(evalue): return -int(str(evalue).split('e')[1]) elif evalue>=1: return 0 elif evalue==0.0: return 200 else: a= -int(round(math_log(evalue, 10))) if a>0: return a else: return 0 except: print "____________ERROR: ",[evalue], type(evalue) def match_any_word(main_string, word_list, is_pattern=1, ignore_case=1): """ Given a string and a list of strings/perl_patterns, it returns 1 is any of them matches the string, 0 otherwise """ for w in word_list: try: if is_pattern: if ignore_case: pattern=re.compile(w, re.IGNORECASE) else: pattern=re.compile(w) if pattern.search(main_string): return 1 else: if ignore_case: if w.lower() in main_string.lower(): return 1 else: if w in main_string: return 1 except: printerr('ERROR with pattern: '+w) raise return 0 def score_tags(blast_evalues_and_titles, positive=[], negative=[], neutral=[], verbose=0, max_n_titles=0): """Input: list of objects [evalue, title] coming from parsing a blast file; evalue should be a e_v class object. titles should be complete (not just gis). Other arguments are: positive, negative, neutral tags, to be searched in the titles. The lines matching a neutral tag are skipped. Lines matching a negative tag (or no positive tag) are score negatively. Lines matching a positive tag are scored positively. The score depends on the negative logarithm of the evalue. """ score=0 if max_n_titles==0: max_n_titles=len(blast_evalues_and_titles) for evalue, title in blast_evalues_and_titles[:max_n_titles]: score_of_this_tag=shortcut_log(evalue) if not match_any_word(title, neutral): if match_any_word(title, negative): score-=score_of_this_tag if verbose: print 'NEGATIVE_M ',title, evalue, '*-'+str(score_of_this_tag) elif match_any_word(title, positive): score+=score_of_this_tag if verbose: print 'POSITIVE ',title, evalue, '*+'+str(score_of_this_tag) else: if verbose: print 'NEGATIVE ',title, evalue, '*-'+str(score_of_this_tag) score-=score_of_this_tag elif verbose: print 'NEUTRAL', title, evalue, '/'+str(score_of_this_tag) return score def dict_to_py_obj(sstring): """Take as input a string, which is a dictionary (hash) as printed usually by python, like : {'a':'asdasfas', 'b': 'asfdasf'} and return a copy of the original object that was run. """ hash_out={} if sstring[0]=='{': sstring=sstring[1:] while sstring and not sstring[0]=='}': #key if sstring[0] in ["'", '"']: #key is string key=sstring[1:].split(sstring[0]+ ":")[0] sstring=sstring[1+len(key)+2+1:] else: value_string=sstring.split(":")[0] key=option_value(value_string) sstring=sstring[len(value_string)+1:] #value if sstring[0] in ["'", '"']: #value is a string if sstring[1:].split(sstring[0]+",")[0]==sstring[1:]: #last value value=sstring[1:].split(sstring[0]+"}")[0] sstring='' else: value=sstring[1:].split(sstring[0]+",")[0] sstring=sstring[1+len(value)+2+1:] else: value_string =sstring.split(',') [0] if value_string ==sstring.split(',') [0] ==sstring: #last value value=option_value(sstring[1:].split("}")[0]) sstring='' else: value= option_value(value_string ) sstring=sstring[len(value_string)+2:] hash_out[key]=value return hash_out def dict_to_config_file(diz, fileout=''): """Take as input a dictionary (typically a opt dictionary) and return a string corresponding to a configuration file that would be loaded in memory as the input dictionary. """ out='' for k in diz: out+='\n'+str(k)+' = '+str(diz[k]) if out: out=out[1:] if fileout: write_to_file(out, fileout) return out def float_generalized(stringg): try: a=float(stringg) return a except ValueError: if stringg[0]=='e': return float('1'+stringg) def replace_chars(astring, chars_list, replace_to_this=''): return ''.join([c if not c in chars_list else replace_to_this for c in astring]) # out='' # for c in astring: # if not c in chars_list: # out+=c # else: # out+=replace_to_this # return out def debug(msg): # if opt.has_key('v') and opt['v']: 'nothing' # print msg opt=options() colored_keywords={} def printerr(msg, put_newline=0, how='', keywords={}, is_service=False): global printed_rchar if not keywords and colored_keywords: keywords=colored_keywords msg=str(msg) if put_newline: msg=msg+'\n' no_color_msg=msg if printed_rchar: sys.stderr.write('\r'+printed_rchar*' '+'\r' ) printed_rchar=0 if sys.stdout.isatty() and not opt['no_colors']: if how: for c in how.split(','): if not terminal_codes.has_key(c): raise Exception, "ERROR option 'how' for write was not recognized: "+str(c)+' ; possible values are: '+join([i for i in terminal_codes.keys() if i], ',') msg=terminal_codes[c]+msg+terminal_codes[''] for word in keywords: code='' for c in keywords[word].split(','): code+=terminal_codes[c] msg= replace(msg, word, code+word+terminal_codes['']) sys.stderr.write(str(msg)) if not is_service and 'log_file' in globals(): print >> log_file, str(no_color_msg), def service(msg): """ see write function""" msg=str(msg) global printed_rchar, opt if sys.stderr.isatty() and not opt['Q']: if printed_rchar: printerr('\r'+printed_rchar*' ', is_service=True ) printerr( "\r"+msg, is_service=True) printed_rchar=len(msg) #if 'log_file' in globals(): print >> log_file, str(msg+'\n') #putting a newline def verbose(msg, put_newline=0): global opt if put_newline: msg=str(msg)+'\n' if opt['v']: write( msg ) if 'log_file' in globals(): print >> log_file, str(msg), terminal_codes={'':'\033[0m', 'red':'\033[31m', 'green':'\033[32m', 'black':'\033[30m', 'yellow':'\033[33m', 'blue':'\033[34m', 'magenta':'\033[35m', 'cyan':'\033[36m', 'white':'\033[37m', 'bright':'\033[1m', 'dim':'\033[2m', 'underscore':'\033[4m', 'blink':'\033[5m', 'reverse':'\033[7m', 'hidden':'\033[8m'} def write(msg, put_newline=0, how='', keywords={}): """ Function to extend the functionalities of the standard 'print'. First argument (put_newline) when set to 1 put a newline after the string passed, as print would normally do. The argument "how" can be given a color to write the message in that color (only for atty terminals). This is prevented if opt['no_colors'] is active. The function write is coupled with function "service" which prints service message which are deleted when another service message is printed, or another message is printed with the write function. If you use service, you should only print things with "write". Argument keywords allows to use certain colors (or other "how" arguments) for certain keywords. The argument is a hash of keywords and correspoding how arguments. for example if you want to higlight all "ERROR" in red, pass keywords={'ERROR':'red'} """ msg=str(msg) global printed_rchar, opt if not keywords and colored_keywords: keywords=colored_keywords if put_newline: msg=msg+'\n' no_color_msg=msg if sys.stdout.isatty() and not opt['no_colors']: if how: for c in how.split(','): if not terminal_codes.has_key(c): raise Exception, "ERROR option 'how' for write was not recognized: "+str(c)+' ; possible values are: '+join([i for i in terminal_codes.keys() if i], ',') msg=terminal_codes[c]+msg+terminal_codes[''] for word in keywords: code='' for c in keywords[word].split(','): code+=terminal_codes[c] msg= replace(msg, word, code+word+terminal_codes['']) if printed_rchar: sys.stderr.write('\r'+printed_rchar*' '+'\r' ) printed_rchar=0 sys.stdout.write(msg) if 'log_file' in globals(): print >> log_file, no_color_msg, warning=write def is_empty_file(filename): return is_file(filename) and os.stat(filename)[6]==0 def is_valid_blast_output(filename): b=bash('tail -30 '+filename) if not b[0] and 'Lambda' in b[1]: return True return False def framesBlastToTranseq( Blastframe, seq_length ): ### translate the blast notation of frames to the ebi transeq notation if Blastframe>0: return Blastframe else: return -( 1+ ( seq_length+ Blastframe+1 )%3 ) def overlapping( range1, range2): if max(range1)<=min(range2) or max(range2) <= min(range1) : return False else: return True def positions_to_frame(pos_start, strand='+', chr_length=None): """ Tells the frame (in blast notation) of a genomic range. we just need the starting position (which is > the end if strand is negative). The chr_length is needed only if the strand is negative. Blast frame notation: + positions: 123456789 123456789 123456789 if starts here: |__|__ |__|__ |__|__ then frame: +1 +2 +3 - positions: 123456789 123456789 123456789 if starts here: __|__| __|__| __|__| then frame: -1 -2 -3 so the trick for neg strand is: pstart%3 - chrlength%3 -1 --> frame ; it doesn't work only if pstart%3 - chrlength%3 -1 > 0, which is only whe pstart%3 is 2 and chrlength%3 is 0 --> supposed result 1, must be -2. I correct it manually. more elegant solutions would be more expensive anyway, so let's do it this way. """ if strand=='+': frame=pos_start%3 if frame==0: frame=3 elif strand=='-': if chr_length is None: raise Exception, "ERROR positions_to_frame: if strand is negative, the chromosome length must be provided to know the frame! " frame = pos_start%3 - chr_length%3 -1 if frame == 1 : frame=-2 else: raise Exception, "ERROR positions_to_frame: strand not recognized: '"+strand+"'" return frame def are_overlapping_ranges( range1, range2): #[min, max], [min, max] ; return False or the new region containing both; #look into picture (lab notes) for case listing. #n refers to the picture #taking mininum and maximum of the 2 ranges################################################### ehiiii if range1[0]<range1[1]: r1_0, r1_1 =range1[0], range1[1] else: r1_1, r1_0 =range1[0], range1[1] if range2[0]<range2[1]: r2_0, r2_1 =range2[0], range2[1] else: r2_1, r2_0 =range2[0], range2[1] ################################################ if r1_1<r2_0 or r2_1 < r1_0: return False # 2 or 3 elif r2_0<r1_0: if r2_1 < r1_1: return [ r2_0, r1_1 ] # 4 else: return [ r2_0, r2_1 ] # 6 elif r2_1 < r1_1 : return [ r1_0, r1_1 ] #5 else: return [ r1_0, r2_1 ] #1 to_comment=['^', '$', '.', '[', ']', '|', '(', ')', '*', '+', '?', "\\" ] def comment_for_gawk(string): # comment characthers so that the string can be found with gawk out='' for char in string: if char in to_comment: out+='\\' out+=char return out def write_to_file(string, filename): filett =open(filename, 'w') print >> filett, string filett.close() def del_white(string): """str-> str with only one white char between words """ string=' '+string+' ' c=1 while len(string)!=c: while string[c-1]==' ' and string[c]==' ' and len(string)!=c+1: string=string[:c-1]+string[c:] c=c+1 return string[1:-2] def configuration_file(filename, just_these_keys={}): """ Utility to read configuration files and return an opt with the parsed information, having as keys the option names, and reporting the values. These are automatically converted to the "minimal" type. If the value is an integere, it is cast to integer, then float is tried, otherwise string. It the argument just_these_keys is provided, only a subset of keys are reported, those present as keys of the hash just_these_keys. Example of configuration file format: temp= /tmp/ profiles_folder = /users/rg/mmariotti/profiles keep_blast=1 # commented lines like this one are not read. empty lines are also not read. blast_options.DEFAULT = -b 5000 -F F blast_options.DEEP = -b 10000 -F F # For dotted keys (what is before the "="), hashes are returned. In the two last lines, a hash is created as value of the output opt, corresponding to the key "blast_options". This nested hash will have two keys: DEFAULT and DEEP, and the corresponding values will be the strings reported in the config file. exonerate_options.example.DEEP = prova # when multiple dots are present in the key, a more complex nested structure of hashes is created. If the configuration file had only the line above, the reported opt would be: {'exonerate_options':{'example':{'DEEP':'prova'}} } """ opt={} for line in open(filename, 'r'): try: if line.split() and line[0]!='#': corrected_line=line if corrected_line[-1]=='\n': corrected_line=corrected_line[:-1] value = join(corrected_line.split('=')[1:], '=') key=del_white(line.split('=')[0]) if '.' in key and not just_these_keys or just_these_keys.has_key(key.split('.')[0]): main_key=key.split('.')[0] secondary_keys=join(key.split('.')[1:], '.') if not opt.has_key(main_key): opt[main_key]={} current_targeted_hash=opt[main_key] while '.' in secondary_keys: # key can be e.g. set.firstfield.secondfield = 3 in this case a key 'set' is created in opt, its value is a new hash. Then a key in this has is added (firstfield) its value being an empty hash, then a key (secondfield) is added to this hash and the value 3 is added to it. secondary_key=secondary_keys.split('.')[0]; secondary_keys=join(secondary_keys.split('.')[1:], '.') if not current_targeted_hash.has_key(secondary_key): current_targeted_hash[secondary_key]={} current_targeted_hash=current_targeted_hash[secondary_key] secondary_key=secondary_keys #now no . is left if not current_targeted_hash.has_key(secondary_key): current_targeted_hash[secondary_key]={} current_targeted_hash[secondary_key]=option_value( del_white(value) ) if not just_these_keys or just_these_keys.has_key(key): opt[key] = option_value( del_white(value) ) except Exception, e: print "ERROR reading configuration file " +filename+ " reading line: "+line[:-1] raise return opt def no_gap(seq): """return the seq without gaps """ out='' for char in seq: if char not in ['-', '.']: out+=char return out def nogap(seq): return no_gap(seq) def alignment_relative_pos(group_align, global_align, neutral_chars='UX*'): """given two alignment class objects or two dictionaries {prot:sequence} like an alignment.diz, this function maps each position of the first alignment into a position of the second one. it returns a list long as the sequence(s) in first alignment, where the i-th element is the position of i-th aminoacid in the second alignment NB each protein in group_align should be present in global_align; NB2: for each position, there should be at least one protein which does not contain '-' in that position. """ def find_prot_no_gap(prot_diz, pos): #returns the name of a protein in the alignment which has not a gap in position pos for prot in prot_diz: if prot_diz[prot][pos]!='-': return prot if type(group_align)==dict: diz=group_align lengh=len(diz.values()[0]) else: diz=group_align.diz lengh=group_align.length() if type(global_align)==dict: glodiz=global_align else: glodiz=global_align.diz output=range(lengh) for i in output: output[i]=-1 prot=diz.keys()[0] for pos in range(lengh): if diz[prot][pos]=='-': prot=find_prot_no_gap(diz, pos) aa=diz[prot][pos] pos_global=output[pos-1]+1 while output[pos]==-1: aa_global=glodiz[prot][pos_global] if aa_global!='-': if lower(aa_global)==lower(aa) or (aa_global in neutral_chars) or (aa in neutral_chars): output[pos]=pos_global # print aa else: print 'alignment_relative_pos ERROR AminoAcids dont correspond:\n>GROUP: '+prot+'\n'+no_gap(diz[prot])+'\n>GLOBAL: '+prot+'\n'+no_gap(glodiz[prot]) return False else: pos_global+=1 return output def mapping_alignments(group_align, global_align, neutral_chars='UX*'): """given two alignment class objects or two dictionaries {prot:sequence} like an alignment.diz, this function maps each position of the first alignment into a position of the second one. it returns a list long as the sequence(s) in first alignment, where the i-th element is the position of i-th aminoacid in the second alignment NB2: for each position, there should be at least one protein which does not contain '-' in that position. """ def find_prot_no_gap(prot_diz, pos, allowed_names={}): #returns the name of a protein in the alignment which has not a gap in position pos for prot in prot_diz: if (not allowed_names or allowed_names.has_key(prot)) and prot_diz[prot][pos]!='-': return prot if type(group_align)==dict: diz=group_align lengh=len(diz.values()[0]) else: diz=group_align.diz lengh=group_align.length() if type(global_align)==dict: glodiz=global_align else: glodiz=global_align.diz output=range(lengh) for i in output: output[i]=-1 prot=diz.keys()[0] for pos in range(lengh): impossibile_position=False #printerr(str(pos)) if diz[prot][pos]=='-' or not glodiz.has_key(prot): prot=find_prot_no_gap(diz, pos, glodiz) if not prot: #in this case, this is a position that exist only in group_align, since all common proteins have a gap in group_align in this position. So we put the same number computed in the previuos position, or the automata will stop, and we correct just before outputing putting -1 in these positions #print "alignment_relative_pos ERROR no common proteins between the alignments" output[pos]=output[pos-1] impossibile_position=True prot=diz.keys()[0] if not impossibile_position: aa=diz[prot][pos] pos_global=output[pos-1]+1 while output[pos]==-1: aa_global=glodiz[prot][pos_global] if aa_global!='-': if lower(aa_global)==lower(aa) or (aa_global in neutral_chars) or (aa in neutral_chars): output[pos]=pos_global # print aa else: print 'alignment_relative_pos ERROR AminoAcids dont correspond:\n>GROUP: '+prot+'\n'+no_gap(diz[prot])+'\n>GLOBAL: '+prot+'\n'+no_gap(glodiz[prot]) return False else: pos_global+=1 #correcting impossibile positions for p in range(1, len(output)): if output[p]==output[p-1]: output[p]=-1 return output def transfer_alignment(group_align_or_diz, global_align_or_diz, neutral_chars='UX*', dont_shrink=False): """given two alignment class objects or two dictionaries {prot:sequence} like an alignment.diz, this function return a new alignment including all sequences of both alignments. you need to provide more common sequences as possible. """ def find_prot_no_gap(names_diz, pos, prot_diz): #returns the name of a protein in the alignment which has not a gap in position pos for prot in names_diz: if prot_diz[prot][pos]!='-': return prot return False ############## ##### to deal with both alignment() objects or simple dictionaries {protname:seq} #### diz={} if type(group_align_or_diz)==dict: for k in group_align_or_diz: diz[k]=group_align_or_diz[k] lengh=len(diz.values()[0]) else: #alignment class for k in group_align_or_diz.diz: diz[k]=group_align_or_diz.diz[k] lengh=group_align_or_diz.length() glodiz={} if type(global_align_or_diz)==dict: for k in global_align_or_diz: glodiz[k]=global_align_or_diz[k] else: for k in global_align_or_diz.diz: glodiz[k]=global_align_or_diz.diz[k] ############# diz_common={} #contains the names of the common prot between group_diz and global for prot in diz: c=complete_word(prot, glodiz) #word, or -1, or False if c and c!=-1: diz_common[c]= True if prot!=c: diz[c] = diz[prot] del diz[prot] if len(diz_common)==0: outt='transfer_aligment ERROR: no common proteins between the two alignments. check the headers!\n>global:\n' for k in glodiz: outt+=k+'\t' outt+="\n>group:\n" for k in diz: outt+=k+'\t' raise Exception( outt ) # print diz_common output=range(lengh) for i in output: output[i]=-1 prot=diz_common.keys()[0] pos=0 while pos <lengh: #print 'pos: '+str(pos) if diz[prot][pos]=='-': prot=find_prot_no_gap(diz_common, pos, diz) # print prot if not prot: #none of the common proteins has something different from a gap in position: pos. # output.append(-1) # lengh+=1 pos_global=output[pos-1]+1 #becomes 0 if pos is 0, for construction output[pos] = pos_global for p_name in glodiz: glodiz[p_name]= glodiz[p_name][:pos_global]+'-'+glodiz[p_name][pos_global:] prot=diz_common.keys()[0] #increment iwht a gap each global seq. set output[pos] = output[pos-1]+1 else: aa=diz[prot][pos] pos_global=output[pos-1]+1 #becomes 0 if pos is 0, for construction while output[pos]==-1: aa_global=glodiz[prot][pos_global] if aa_global!='-': if lower(aa_global)==lower(aa) or (aa_global in neutral_chars) or (aa in neutral_chars): output[pos]=pos_global else: print 'transfer_alignment ERROR AminoAcids dont correspond in pos '+str(pos)+'('+aa_global+' != '+aa+') for prot:'+prot print '>cluster' print diz[prot] print '>global' print glodiz[prot] return else: pos_global+=1 pos+=1 #over: for p_name in diz: # ->up seq='' for pos in range(len(output)): if pos==0: for i in range(output[0]): #adding initial gaps seq+='-' else: for i in range(output[pos]-output[pos-1]-1): seq+='-' seq+=diz[p_name][pos] for i in range(len(glodiz[glodiz.keys()[0]])-1 -output[-1]): #adding final gaps seq+='-' if diz_common.has_key(p_name): #just checking. if everything is ok, I should bring this IF ->up; if common_diz.has_key(p_name): don't do 'nothing' # if seq!=glodiz[p_name]: # print "transfer_alignment ERROR: with "+p_name+'; seq_group = '+seq +' seq_glodiz = '+glodiz[p_name] else: glodiz[p_name]=seq #correcting desert columns defect: if not dont_shrink: glodiz.shrink() return glodiz class simmetrical_hash(dict): """ ........... old and bad implementation; see symmetrical_dict instead """ def get(self, k1, k2): try: return self[k1][k2] except: return self[k2][k1] symmetrical_hash=simmetrical_hash class symmetrical_dict(dict): """ Symmetrical dictionary. Usage: h=symmetrical_dict() h['a']['b']= 'something' # this and h['b']['a']='something' have the same effect print h['a']['b'] --> 'something' print h['b']['a'] --> 'something' print h['a']['x'] --> None # NOTE THIS! h.has_keys('a', 'c') -> False """ #def get_value(self, a, b): a, b=sorted([a, b]); return self[a][b] if a in self and b in self[a] else None # def set_value(self, a, b, value): # a, b=sorted([a, b]); # if not a in self: self[a]=self.subdict(parent=self, mainkey=a) # self[a][b]=value def __getitem__(self, key): if not key in self: self[key]=self.subdict(parent=self, mainkey=key) return dict.__getitem__(self, key) class subdict(dict): def __init__(self, parent, mainkey, *args, **kargs): dict.__init__(self) dict.__setitem__(self, '__parent__', parent ) dict.__setitem__(self, '__mainkey__', mainkey) def __getitem__(self, key): if key < dict.__getitem__( self, '__mainkey__' ): return dict.__getitem__( #parent dict--> get the right subdict # # index it with the mainkey of this subdict # dict.__getitem__( dict.__getitem__(self, '__parent__'), key), dict.__getitem__(self, '__mainkey__') ) else: return dict.__getitem__(self, key) if key in self else None def __setitem__(self, key, value): if key=='__parent__' or key=='__mainkey__': dict.__setitem__(self, key, value) elif key < dict.__getitem__(self, '__mainkey__'): dict.__getitem__(self, '__parent__')[key][dict.__getitem__(self, '__mainkey__')] = value else: dict.__setitem__(self, key, value) def has_keys(self, a, b): if b<a: a,b=b,a return dict.has_key(self, a) and b in self[a] def all_keys(self): out=set(dict.keys(self)) for x in dict.keys(self): for x in dict.keys( dict.__getitem__(self, x) ): if x!='__mainkey__' and x!='__parent__': out.add(x) return list(out) class AliError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class alignment: """objects: diz= {protname: seq_with_gaps} order=[protname1, protname2, ...protnameN] OLD: [ ss= {protname: ss_with_gap} consensus= sequence long as the alignment (type =string) . see fill_consensus() consensus_ss= secondary_structure long as the alignment (type =string) . see fill_consensus() ] methods: init: alignment(alignment_file_path, format='fasta', short_titles=False) --->loads each prot seq in self.diz ={protname:seq} titles(): return a (ordered) list of the protnames of the alignment seq_of(protname): return the sequence of an entry in the alignment. The fill_title function is eventually activated for this fill_title(title): given a uncomplete title (as cut by some programs), it return the complete title instead, if a unique title was found. add(protname, seq): adds protname:seq to self.diz; it's considered the last in order remove(protname): remove chosen protein from alignment. after that, remove_useless_gaps() is run remove_useless_gaps(): if all sequences have a '-' in a certain position, it's removed from all sequences nseq(): returns number of seq in alignment length(): checks if all sequences have same length, and returns it. display(): print alignment in fasta simple format fasta(): returns a string with a fasta text of alignment, removing gaps in sequences check_conservation(pos, lett): returns the conservation percentage of LETTER in global POSition in the alignment. conservation(pos, permitted_lett=AA_LETT+'-', threshold=0.0): returns a dictionary like {letter: conservation} at POS, if conservation>threshold conservation_map(thresholdC=0.0): returns a list of dictionary like {letter: conservation}, one per position check_conservation_ss(pos, lett): same as check_conservation but for secondary structure conservation_ss( pos, permitted_lett=SS_LETT+'-', threshold=0.0) : same as conservation but for secondary structure fill_ss(xff_folder, suffix=''): fill the ss object with secondary structures read from files in xff_folder es (using _h as suffix): ../xff_folder/vdac_drome_h.xff fill_consensus(): calculate a consensus for sequences taking the most present aa in each position, and put it in self.consensus. The same is done for ss (if present); consensus for ss is put in self.consensus_ss transfer_consensus(xff_folder='xff', suffix='', outfold='consensus'): it extracts secondary structure information from files (with chosen suffix) in xff_folder, calculates the consensus ss and trasfer it to each protein: a xff file per protein is opened and written in the output folder (outfold) order_by_similarity_with(title, matrix): it returns a list of protnames, which are ordered by similarity with the protein named "title", which must be present in the alignment. This protein is not returned in the list. all_positions_of(aa): it returns a list of positions (starting from 0) corresponding to occurences of the aminoacid specified. This can occur in any of the seqs of the alignment identity_matrix(): it returns a list of n elements (where n is the length of the alignment) of 0 or 1-> if all sequences have the same aminoacid at this position sequence_identity(): it returns a float from 0 to 1, counting all perfect match positions / the lenght of the alignment . Makes sense for two seqs alignments order_with(o_function): the o_function must be a cmp like function taking as input two [title, seq] objects. The self alignment is ordered accordingly NEED UPDATE! """ def __init__(self, alignfile='', format='fasta', short_titles=False, check_uniq_titles=False): self.reset_derived_data() self.order=[] self.diz={} if type(alignfile)==dict: #you can initialise the alignment object even using directly the diz object. In this case, random order is assigned for k in alignfile: self.diz[k]=alignfile[k] self.order.append(del_white(k)) # self.display() elif alignfile!='': self.load_file(alignfile, format=format, short_titles=short_titles, check_uniq_titles=check_uniq_titles) self.ss={} self.length() # check if all sequences have same length def titles(self): return list(self.order) def load_file(self, alignfile, format='fasta', short_titles=False, check_uniq_titles=False): """ loading a fasta or clustalw format alignment""" if format=='fasta': if check_uniq_titles: all_short_titles={} for title,seq in parse_fasta(alignfile): if short_titles: title=title.split()[0] if check_uniq_titles: if all_short_titles.has_key(title.split()[0]): raise Exception, "alignment-> load ERROR short title in the alignment is not uniq: "+title.split()[0] all_short_titles[title.split()[0]]=1 if self.diz.has_key(title): printerr('alignment class WARNING: the title "'+title+'" is present in more than one sequence. only the last one will be stored in the alignment object.', 1 ) else: self.order.append(title) self.diz[title]=seq elif format=='stockholm': self.load_stockholm(alignfile) else: #clustal for [title, seq] in getfastaaln(open(alignfile, 'r'), order=1): if short_titles: title=title.split()[0] if self.diz.has_key(title): printerr('alignment class WARNING: the title "'+title+'" is present in more than one sequence. only the last one will be caught in the alignment object.' ) else: self.order.append(title) self.diz[title]=seq def load_stockholm(self, filename): """Loads a stockholm file into the alignment. ss is stored into attribute .ss if present, same thing for .rf """ self.order=[]; self.diz={}; self.ss=''; self.rf='' fileh=open(filename, 'r') line=fileh.readline() while line and (line[0]=='#' or not line.split()): line=fileh.readline() #now in first alignment line while line and line[0]!='#' and line!='//\n' and line.split(): self.add(line.split()[0], line.split()[1]) #parsing first block line=fileh.readline() while line and (line[0]=='#' or not line.split()): if line.startswith('#=GC SS_cons'): self.ss=line.split()[-1] if line.startswith('#=GC RF'): self.rf=line.split()[-1] line=fileh.readline() #now in beginning of new block, or last line while line and line!='//\n': if line.startswith('#=GC SS_cons'): self.ss+=line.split()[-1] elif line.startswith('#=GC RF'): self.rf+=line.split()[-1] elif line.split(): if self.has_title( line.split()[0] ): self.set_sequence( line.split()[0], self.seq_of(line.split()[0])+ line.split()[1] ) else: self.add( line.split()[0], line.split()[1] ) line=fileh.readline() fileh.close() if line!='//\n': raise Exception, "ERROR loading stockholm file "+filename+' : \\ was not found at the end of file' self.convert_sequences(replace, '.', '-') def load_cmalign_out(self, filename): """ load an alignment from a cmalign out. this is almost identical to a stockholm format, but with 1 line which disrupts it (I think it's a bug); it also set the cm_name attribute to the name of cm found in cmalignout file""" try: self.load_stockholm(filename) return except: pass fileh=open(filename); tempfileh=open(temp_folder+'temp_cmalign_out.stk', 'w') line=fileh.readline() while line and not line.startswith('# cm name'): line=fileh.readline() line=fileh.readline(); line=fileh.readline() self.cm_name=line.split()[1] while line and not line.startswith('# seq idx'): line=fileh.readline() line=fileh.readline(); line=fileh.readline(); line=fileh.readline() #skipping line which makes the stockholm parser crash while line: print >> tempfileh, line, line=fileh.readline() tempfileh.close() self.load_stockholm(temp_folder+'temp_cmalign_out.stk') def __repr__(self): return self.summary() def subset(self, titles): """ Return an alignment containing only certain sequences, defined by argument titles (hash or list, hash is faster), which must contain the complete titles or the first word. Order in outptu is preserved from self alignment """ b=alignment() for title in self.titles(): if title in titles or title.split()[0] in titles: b.add(title, self.seq_of(title)) return b def fill_title(self, uncomplete_title, silent=False): """given a title, returns the title of the alignment from where this title where cut. If it is not unique, or none is matching, reFalse """ if uncomplete_title in self.diz: return uncomplete_title title_that_match=complete_word(uncomplete_title, self.diz, word_match=True) #print title_that_match if not title_that_match: if len(uncomplete_title.split())>1: return self.fill_title( uncomplete_title.split()[0], silent ) else: if not silent: raise Exception, "ERROR the aligment object have no sequence starting with the title: "+uncomplete_title return False elif title_that_match==-1: if len(uncomplete_title.split())>1: return self.fill_title( uncomplete_title.split()[0], silent ) else: if not silent: raise Exception, "ERROR the aligment object have no unique sequence starting with the title: "+uncomplete_title return False return title_that_match def seq_of(self, title): if self.diz.has_key(title): return self.diz[title] elif type(title)==int: if title>=len(self.titles()): raise AliError("seq_of function ERROR the alignment object have no sequence with the id: "+str(title) ) return False return self.seq_of(self.titles()[title]) else: #trying to recover the seq of a uncomplete title. returning it only if it's unique complete_title= self.fill_title(title, True) if complete_title: return self.diz[complete_title] #trying to recover from only the first word complete_title= self.fill_title(title.split()[0]+' ', True) if complete_title: return self.diz[complete_title] raise AliError("seq_of function ERROR the alignment object have no sequence with the title: "+title) return False def has_title(self, title, even_partial=False): if even_partial: return bool(self.fill_title(title, silent=True)) else: return self.diz.has_key(title) def codeml_format(self): o=' '+str(len(self.titles()))+' '+str(self.length())+'\n' for title in self.titles(): short_title= title.split()[0] o+= '>'+short_title+'\n'+self.seq_of(title) +'\n' return o[:-1] def phylip_format(self, chars_per_block=60): if not self.check_length(): raise Exception, "Sequences are not aligned! can't output phylip" short_titles= [ title.split()[0] for title in self.titles() ] max_short_title_length= max( [ len(s) for s in short_titles ] ) o=' '+str(len(self.titles()))+' '+str(self.length())+'\n' for c in range( (self.length()-1)/chars_per_block +1): #number of blocks for index, title in enumerate( self.titles() ): if c==0: title_shown= short_titles[index].ljust(max_short_title_length)+' ' else: title_shown=''# ' '*max_short_title_length + ' ' o+=title_shown + self.seq_of(index)[ c*chars_per_block: (c+1)*chars_per_block ]+'\n' o+='\n' return o def clustal_format(self, ali_chars=60): if not self.nseq(): return '' h={} for t in self.titles(): if h.has_key( t.split()[0] ): raise Exception, "ERROR clustalw_format: the first word of titles must be unique to call this function" h[t.split()[0]]=1 max_length= max ( [len(t) for t in h] ) n_lines = self.length() / ali_chars out='CLUSTAL W (x.xx) multiple sequence alignment\n' if self.length() % ali_chars: n_lines+=1 for line_index in range(n_lines): for t in self.titles(): out+= t.split()[0].ljust(max_length)+' ' + self.seq_of(t) [ line_index*ali_chars : (line_index+1)*ali_chars ] + '\n' if line_index != n_lines-1: out+='\n\n' return out def fill_sides(self, source_ali, inplace=False, wild_chars=[]): """ This function is useful when you have an alignment coming for example from a genomic prediction program, which aligns only a portion of the query. This function takes the full seq of the query from source_ali, finds the missing parts and add them. It also replaces * in the self query with U. If the sequences do not match, if specified, attempt to match them considering wild_chars. The sequence returned will have the sequence matching perfectly the one from source ali. NOTE! If the sequence in self contains characters considered special by the re module, the behavior of this function is unpredictable. """ if self.nseq()!=2: raise Exception, 'alignment->fill_sides ERROR this function can be applied only to alignments with 2 sequences' if inplace: a=self; self.reset_derived_data() else: a=self.copy() common_titles={} #complete_title: title ; they can be different, in that case title is the one in the self while complete_title is the one in the source_ali for title in a.titles(): complete_title= source_ali.fill_title(title, silent=True) #is false if it doesn't have it if complete_title: common_titles[complete_title]=title if len(common_titles)!=1: raise Exception, 'alignment->fill_sides ERROR too few or too many common titles between the two alignment provided (must be 1, it is: '+str(len(common_titles))+')' title_source_ali=common_titles.keys()[0]; title_self=common_titles[title_source_ali] complete_query_seq= nogap(source_ali.seq_of(title_source_ali)) partial_aligned_query_seq= a.seq_of(title_self) pos_start= complete_query_seq.find(nogap(partial_aligned_query_seq)) #0 BASED #wild chars match if pos_start==-1 and wild_chars: pattern=re.compile('('+replace_chars(nogap(partial_aligned_query_seq), wild_chars, '.')+')') match=pattern.search(complete_query_seq) if match: pos_start= match.start() s=match.groups() seq=s[0] #adding gaps to seq find_gap= partial_aligned_query_seq.find('-') while find_gap!=-1: seq=seq[:find_gap]+'-'+seq[find_gap:] find_gap= partial_aligned_query_seq.find('-', find_gap+1) partial_aligned_query_seq=seq a.set_sequence(title_self, partial_aligned_query_seq) ### NOTE the following code block is almost surely useless, but I keep it just for security. I now resolve the wild char match using the re module above. if pos_start==-1 and wild_chars: # here trying to match considering some possible wild chars. In these positions the aminoacid is inferred from the complete_sequence in source_ali wild_char='%' wild_char_pos= partial_aligned_query_seq.find(wild_char) ; next_wild_char_pos=partial_aligned_query_seq.find(wild_char, wild_char_pos+1) while wild_char_pos!=-1: bit_before_wild_char= partial_aligned_query_seq[:wild_char_pos] bit_before_wild_char_pos=complete_query_seq.find(nogap(bit_before_wild_char)) while bit_before_wild_char_pos!=-1: if next_wild_char_pos==-1: seq_to_match_in_partial_query_seq = nogap( partial_aligned_query_seq [ wild_char_pos+1:] ) else: seq_to_match_in_partial_query_seq = nogap( partial_aligned_query_seq [ wild_char_pos+1: next_wild_char_pos ] ) seq1= complete_query_seq[bit_before_wild_char_pos+1+len(nogap(bit_before_wild_char)):] if seq1.startswith(seq_to_match_in_partial_query_seq): # checking that the match found by the sequence just before the wild char is the correct one. to know this, I check the characters right after the wild char in the complete sequence: they must correspond with the sequence in the partial sequence after the wild char. aligned_to_wild_char= complete_query_seq[bit_before_wild_char_pos+len(nogap(bit_before_wild_char))] partial_aligned_query_seq= partial_aligned_query_seq[:wild_char_pos]+ aligned_to_wild_char +partial_aligned_query_seq[wild_char_pos+1:] bit_before_wild_char_pos=complete_query_seq.find(nogap(bit_before_wild_char), bit_before_wild_char_pos+1) wild_char_pos= partial_aligned_query_seq.find(wild_char, wild_char_pos+1) a.set_sequence(title_self, partial_aligned_query_seq) pos_start= complete_query_seq.find(nogap(partial_aligned_query_seq)) #0 BASED if pos_start==-1: #rechecking if wild chars mode, or just checking to raise if no wild_chars raise Exception, 'alignment->fill_sides ERROR sequences don\'t match for title: '+title_self+' -> '+nogap(a.seq_of(title_self))+" can't be found in "+complete_query_seq pos_end = pos_start+len(nogap(a.seq_of(title_self)))-1 a.diz[title_self]= complete_query_seq[:pos_start] + a.seq_of(title_self) + complete_query_seq[pos_end+1:] for t in a.titles(): if t!=title_self: a.diz[t]='-'*pos_start+a.seq_of(t)+'-'*len(complete_query_seq[pos_end+1:]) if not inplace: return a def remove(self, protname, dontremoveuselessgaps=False): del self.diz[protname] if type(self.ss) == dict and self.ss.has_key(protname): del self.ss[protname] self.reset_derived_data() for i in range(len(self.order)): if self.order[i]==protname: self.order.pop(i) break if not dontremoveuselessgaps: self.remove_useless_gaps() return i def change_title(self, title, new_title): """Change the title of a sequence in the alignment to new_title. the order of the sequence in the alignmetn is preserved """ try: index= self.titles().index(title) except: raise Exception, "alignment->change_title() ERROR the title provided is not found among the titles of this alignment: "+title seq= self.seq_of(title) self.remove(title, dontremoveuselessgaps=True) self.add(new_title, seq, index) def is_gap_column(self, pos, char='-'): """ Return true is all sequence have a gap (or another character in input) at position pos (0-based!)""" for title in self.titles(): if self.seq_of(title)[pos]!=char: return False return True def stockholm(self, block_length=150): """ stockholm output for alignments. if a .ss attribute is defined, a #=GC line with SS_cons will also be included. Same thing for .rf attribute """ stockholm_out='# STOCKHOLM 1.0 \n\n' max_length_title=max([len(t.split()[0]) for t in self.titles()]+[12] ) #12 is for length of #GC tag print_ss=False; print_rf=False try: assert self.ss; assert len(self.ss)==self.length(); print_ss=True except: pass try: assert self.rf; assert len(self.rf)==self.length(); print_rf=True except: pass for i_block in range( 1+(self.length()-1)/block_length ): for title in self.titles(): stockholm_out+=(title.split()[0]).ljust(max_length_title)+' '+replace(self.seq_of(title)[i_block*block_length:(i_block+1)*block_length], '-', '.')+'\n' if print_ss: stockholm_out+='#=GC SS_cons'.ljust(max_length_title)+' '+self.ss[i_block*block_length:(i_block+1)*block_length]+'\n' if print_rf: stockholm_out+='#=GC RF'.ljust(max_length_title)+' '+self.rf[i_block*block_length:(i_block+1)*block_length]+'\n' stockholm_out+='\n' stockholm_out+='//' return stockholm_out def remove_useless_gaps(self): """if all sequences have a '-' in a certain position, it's removed from all sequences """ if not self.diz: return columns_removed={} for pos in range( self.length() ): if self.is_gap_column(pos): columns_removed[pos]=1 #annotating: we have to remove this pos+=1 old_length=self.length() for title in self.titles(): old_seq=self.seq_of(title) new_seq='' for pos in range( old_length ): if not pos in columns_removed: new_seq += old_seq[pos] self.set_sequence(title, new_seq) if columns_removed: self.reset_derived_data() return sorted(columns_removed.keys()) remove_empty_columns=remove_useless_gaps def add(self, protname, seq, index=None): if self.diz.has_key(del_white(protname)): if seq == self.seq_of(del_white(protname)): printerr('alignment class WARNING: the title "'+protname+'" is already present in one sequence. Only one will be kept.\n') else: printerr('alignment class WARNING: the title "'+protname+'" is already present in one sequence and the sequences DIFFER! only the last one added will be kept.\n') self.set_sequence(protname, seq) else: self.diz[del_white(protname)]=seq if index is None: index=len(self.order) self.order.insert(index, del_white(protname)) self.reset_derived_data() def __nonzero__(self): return bool(self.order) def nseq(self): return len(self.diz) def length(self): if self: return len(self.seq_of(0)) else: return 0 def check_length(self): """ Parse the alignment and returns False if it is not true that all sequences have the same length, True if it is regular""" l=0 for title in self.titles(): if not l: l=len(self.seq_of(title)) elif l!=len(self.seq_of(title)): return False return True def translate(self): """For coding sequence alignments. Returns a copy of the alignment with the same titles and translated sequences """ a=alignment() for t in self.titles(): a.add(t, transl(self.seq_of(t))) return a def fill_ss(self, xff_folder, suffix=''): """fill the ss object with secondary structures read from files in xff_folder es (using _h as suffix): ../xff_folder/vdac_drome_h.xff """ for prot in self.diz: temp=getfastaxff(open(xff_folder+'/'+prot+suffix+'.xff', 'r').readlines(), 0) if len(temp)==1: sss=temp[temp.keys()[0]] else: sss=temp[prot] ss_cont=0 ss_to_add='' # print prot for pos in range(self.length()): a=self.diz[prot][pos] if a!='-': if sss[0][ss_cont]==a: ss_to_add+=sss[1][ss_cont] ss_cont+=1 else: ss_to_add+='-' self.ss[prot]=ss_to_add def display(self, to_file='', return_it=False, fasta=False): if to_file: newfile=open(to_file, 'w') for t in self.titles(): seq=self.seq_of(t) if fasta: seq=fasta(seq) print >> newfile, ">"+t+'\n'+seq newfile.close() else: out='' for t in self.titles(): seq=self.seq_of(t) if fasta: seq=fasta(seq) out+= '\n>'+t+'\n'+seq if out: out=out[1:] if return_it: return out else: print out def summary(self, titles=[]): out='' for prot_index in range(len(self.order)): if titles: prot=titles[prot_index] else: prot=self.order[prot_index] out+= '\n>'+prot+'\n'+self.diz[prot] if out: out=out[1:] return out def fasta(self, protein=''): out='' for prot in self.titles(): if protein=='' or prot==protein : out+='>'+prot+'\n'+no_gap(self.seq_of(prot))+'\n' return out def aligned_fasta(self): out='' for prot in self.titles(): out+='>'+prot+'\n'+self.seq_of(prot)+'\n' return out def check_conservation(self, pos, lett): """ alignment = list of sequences; returns the conservation percentage of LETTER in global POSition in the ALIGNMENT. From 0.0 to 1.0 DEPRECATED""" nmatch=0.0 for prot in self.diz: if self.diz[prot][pos]==lett: nmatch+=1 cons=float(nmatch/len(self.diz)) if len(str(cons))>5: #approximation to 0.nnn ???????? cons=float(str(cons)[:5]) return cons def conservation(self, pos, permitted_lett='', threshold=0.0, exclude={}): """ alignment = list of sequence; returns a dictionary like {letter: conservation} at POS, if conservation>=threshold; you can provide a hash of titles to be excluded. If permitted_lett=='', then all letters are permitted. Pos is 0 based!!!""" counts={}; titles_considered=0; out={} for title in self.titles(): if not exclude.has_key(title): aa=self.seq_of(title)[pos] counts[aa]=counts.setdefault(aa, 0)+1 titles_considered+=1 for k in counts: if not permitted_lett or k in permitted_lett: percent=counts[k]/float(titles_considered) if percent>= threshold: out[k]=percent return out def most_conserved(self, pos, permitted_lett=AA_LETT, threshold=0.0): """return a tuple (aa,conservation) . If only gaps are present at that position, ('', 0.0) is returned DEPRECATED """ cons_profile=self.conservation(pos, permitted_lett, threshold) max_percent, max_aa =0.0, '' for aa in cons_profile: if cons_profile[aa] > max_percent: max_percent=cons_profile[aa] max_aa=aa return (max_aa, max_percent) def check_conservation_ss(self, pos, lett): """ alignment = list of sequences; returns the conservation percentage of LETTER in global POSition in the ALIGNMENT. From 0.0 to 1.0 DEPRECATED""" nmatch=0.0 for prot in self.ss: if self.ss[prot][pos]==lett: nmatch+=1 cons=float(nmatch/len(self.ss)) if len(str(cons))>5: #approximation to 0.nnn ???????? cons=float(str(cons)[:5]) return cons def conservation_ss(self, pos, permitted_lett=SS_LETT+'-', threshold=0.0): """ alignment = list of sequence; returns a dictionary like {letter: conservation} at POS, if conservation>threshold DEPRECATED""" out={} for lett in permitted_lett: n=self.check_conservation_ss(pos, lett) if n>threshold: out[lett]=n return out def fill_consensus(self): """calculate a consensus for sequences taking the most present aa in each position, and put it in self.consensus if secondary structure information is present, this is done even for ss; consensus for ss is put in self.consensus_ss DEPRECATED """ def max_value(diz, exclude=[]): #returns the key with a higher value associated, excluding keys in exclude list max_v=None the_key=None for key in diz: if diz[key]>=max_v and not key in exclude: the_key=key max_v=diz[key] # if the_key==None: return '-' return the_key self.consensus='' for pos in range(self.length()): self.consensus+=max_value(self.conservation(pos), ['-']) if self.ss!={}: self.consensus_ss='' for pos in range(self.length()): self.consensus_ss+=max_value(self.conservation_ss(pos), ['-']) def position_in_seq(self, title, position): """ this function computes the position relative to a single sequence in the alignment, given the position in the alignment. NB !! All positions are 1 based here. Also, if you ask for a position which is a gap in the desired sequence, the position returned refers actually to the last non gap position in the seq (0 if non existing) """ seq=self.seq_of(title)[:position] n_gaps_until_position=0 for s in seq: if s=='-': n_gaps_until_position+=1 return position-n_gaps_until_position def position_in_ali(self, title, position): """ this function computes the position in the alignment in which the sequence "title" has position "position". it is like the contrary of position_in_seq. NB positions are 1 based here! """ seq=self.seq_of(title) pos_seq=0 for p, aa in enumerate(seq): if aa!='-': pos_seq+=1 if pos_seq==position: return p+1 def conservation_map(self, thresholdC=0.0, exclude={}, dont_save=False): """returns a list of dictionary like {letter: conservation}, one per position. Titles in list exclude will not taken into account """ exclude_key= join(sorted(exclude.keys()), '&') # to index self.conservation_map_data to keep the data if self.conservation_map_data is None: self.conservation_map_data={} if not self.conservation_map_data.has_key( exclude_key ): out=[] for pos in range(self.length()): out.append(self.conservation(pos, threshold=thresholdC, exclude=exclude)) if dont_save: return out self.conservation_map_data[exclude_key]=out return self.conservation_map_data[exclude_key] def conservation_quadratic_score(self, titles=None): """ Return a list of scores, one per position of the alignment, computed as the sum of proportion of any character found at that position (except for character -, which then are like scored negatively). Titles can be used to call the function on a subset of the alignment.""" exclude = {} if titles is None else dict.fromkeys( [t for t in self.titles() if not t in titles] ) c_map=self.conservation_map( exclude=exclude ) out=[] #one score per position for pos in range(self.length()): cons_dict_pos= c_map[pos] #pos 0 based score=0.0 for char in cons_dict_pos: if char!='-': score+= cons_dict_pos[char]**2 out.append(score) return out def reset_derived_data(self): """Delete the attributes derived from computation of the alignment, as for example .conservation_map_data. This function must be run everytime some modification to the self alignment is done. """ self.conservation_map_data=None def trim_columns(self, max_non_gaps=0.1, inplace=True, remove_empty_seqs=False): """Clean the alignment by removing the desert columns. Only the columns for which at least X sequences have something different than a gap are kept """ #getting list of positions to remove desert_columns= self.find_desert_columns(max_non_gaps=float(max_non_gaps), length_clusters=1) positions_to_remove={} for pos, lenght in desert_columns: for i in range(lenght): positions_to_remove[ pos+i ]=True ## now building new ali skipping those positions a=alignment() for title in self.titles(): seq='' for pos in range(self.length()): if not pos in positions_to_remove: seq+= self.seq_of(title)[pos] a.add(title, seq) if not remove_empty_seqs: a.remove_empty_seqs() a.reset_derived_data() if not inplace: return a else: self.__dict__=deepcopy(a.__dict__); return sorted(positions_to_remove.keys()) def consensus_sequence(self, threshold=0.0, sec_char='', exclude={}): """This function computes a consensus sequences taking into account all sequences in the alignment (apart from the titles in the input hash exclude). Not all the columns are taken into account: only those having at maximum \ "threshold" gaps (in proportion). If sec_char is set to a non-False value, any column containing a "U" will return a sec_char, regardless of number of gaps in this column. If a master alignment is provided, the conservation threshold is checked with this alignment instead of with self. This provided that the master alignment must have column numbering identical to self. """ seq='' conservation_map_data=self.conservation_map(exclude=exclude) for pos in range(self.length()): # looking at the conservation profile at this position. sorting the aminoacid (or gap character) according to their representation in this column sorted_keys=sorted(conservation_map_data[pos].keys(), key=conservation_map_data[pos].get, reverse=True) if sec_char and 'U' in sorted_keys: seq+=sec_char elif len(sorted_keys)==1 or sorted_keys[0]!='-': seq+=sorted_keys[0] elif conservation_map_data[pos].setdefault('-', 0.0) <= threshold and len(sorted_keys)>1: seq+=sorted_keys[1] else: seq+='-' return seq def transfer_consensus(self, xff_folder='xff', suffix='', outfold='consensus'): """it extracts secondary structure information from files (with chosen suffix) in xff_folder, calculates the consensus ss and trasfer it to each protein: a xff file per protein is opened and written in the output folder (outfold) """ self.fill_ss(xff_folder, suffix) self.fill_consensus() for prot in self.diz: # print prot seq=no_gap(self.diz[prot]) ss_cons='' rel_pos=alignment_relative_pos({prot:seq}, self.diz) for p in range(len(rel_pos)): ss_cons+=self.consensus_ss[rel_pos[p]] newfile=open(outfold+'/'+prot+suffix+'C.xff', 'w') print >>newfile, '>'+prot+'\n'+seq+'\n#'+ss_cons newfile.close() def order_by_similarity_with(self, title, matrix=''): """ returns an ordered list of protnames ordered by the similarity with the sequence named title, which must be in the alignment (this protname is not reported) a matrix can be specified for scoring, otherwise the identity matrix is used. matrix object in input must be an hash of hashes. e.g. identity matrix would be like {'aa1':{'aa1':1, 'aa2':0, 'aa3':0, ....}, 'aa2':{'aa1':0, 'aa2':1, 'aa3':0...}, ...} """ work_list=[] score_diz={} for protname in self.titles(): if protname!=title: work_list.append(protname) score_diz[protname]=0 for pos in range(self.length()): #aa1=self.diz[title][pos] #aa2=self.diz[protname][pos] if not self.diz[title][pos]=='-': if matrix: score_diz[protname] += matrix[ self.diz[title][pos] ][ self.diz[protname][pos] ] #YOU HAVE TO PROVIDE THE MATRIX AS AN HASH LIKE {'aa1':{'aa1':1, 'aa2':0, 'aa3':0, ....}, 'aa2':{'aa1':0, 'aa2':1, 'aa3':0...}, ...} else: #simulating identity matrix score_diz[protname] += int(self.diz[title][pos]==self.diz[protname][pos]) def compare_function(x, y): if score_diz[x]>score_diz[y]: return -1 elif score_diz[x]==score_diz[y]: return 0 else: return 1 # print score_diz work_list.sort(compare_function) # print work_list return work_list def sequence_identity_at_position(self, pos): """Return the simple sequence identity at position pos (0 based, ali based). The most conserved character at each position is considered, unless it is a gap. In that case, either the second char proportion is reported (if any is present), or 0.0 is returned.""" cmap_this_pos=self.conservation_map()[pos] sorted_chars=sorted ( cmap_this_pos.keys(), key=lambda x: cmap_this_pos[x] , reverse=True ) the_char=sorted_chars[0] if the_char == '-': if len(sorted_chars)==1: return 0.0 else: the_char=sorted_chars[1] return cmap_this_pos[the_char] def sequence_identity_list(self): """Return a list of same length of alignment, with values computed at each position by function sequence_identity_at_position""" return [self.sequence_identity_at_position(pos) for pos in range(self.length())] def sequence_identity(self, count_terminal_gaps=True): """ returns the seq identity of the alignment. if count_terminal_gaps=False, then terminal gaps of the sequence having the longest one are excluded from the count of the total length of the alignment""" if len(self.titles())==1: return 1.0 if len(self.titles())==0: return 0.0 n_matches=0 for pos in range(self.length()): aa=self.seq_of(self.titles()[0])[pos] index_title=1 this_pos_is_mismatch=False while index_title < len( self.titles() ) and not this_pos_is_mismatch: if self.seq_of(self.titles()[index_title])[pos]!=aa: this_pos_is_mismatch=True index_title+=1 if not this_pos_is_mismatch: n_matches+=1 length=self.length() if not count_terminal_gaps: n_boundaries, c_boundaries =[], [] for i in self.titles(): a=self.boundaries_of(i) n_boundaries.append(a[0]) c_boundaries.append(a[1]) nterminal_gap_length= max (n_boundaries ) cterminal_gap_length= self.length()-1 - min ( c_boundaries ) length-= (nterminal_gap_length+cterminal_gap_length) return float(n_matches)/length def boundaries_of(self, title): """returns the limits, starting from 0, of a seq in the alignment, meaning the first and last positions with something different than a gap """ met_the_start=False max_gap_pos=-1 for pos in range(self.length()): if self.seq_of(title)[pos]=='-': if not met_the_start: max_gap_pos=pos else: last_non_gap_pos=pos met_the_start=True return (max_gap_pos+1, last_non_gap_pos) boundaries=boundaries_of def all_positions_of(self, aa, except_in_seq=[], minimum_number=1): """it returns a list of positions (starting from 0) corresponding to occurences of the aminoacid specified. This can occur in any of the seqs of the alignment. """ out_list=[] for pos in range(self.length()): found=0 cont_prot=0 while found<minimum_number and cont_prot< self.nseq(): if self.seq_of( self.order[cont_prot] ) [pos] == aa and not (self.order[cont_prot] in except_in_seq) : found+=1 cont_prot+=1 if found>=minimum_number: out_list.append(pos) return out_list def identity_matrix(self): id_matrix=[] for i in range(self.length()): i_seq=0 all_the_same=1 aa=self.seq_of(self.titles()[0])[i] while i_seq < self.nseq() and all_the_same: if self.seq_of(self.titles()[i_seq])[i]!=aa: all_the_same=0 i_seq+=1 id_matrix.append(all_the_same) return id_matrix def local_mismatches(self, min_length=4, min_match_length=4, invert=0): """parse a 2 seq alignment and return the coordinates of local mismatches. These are defined as windows of minimal length min_length, surrounded by strecthes of 100% conservation of minimum length: min_match_length. indexes are starting with 0 !!!! DO NOT RELY ON THIS FUNCTION, REBUILD IT IF YOU HAVE TO USE IT. """ if self.nseq()!=2: raise Exception, 'cannot use this function (alignment.local_mismatches) on more or less than 2 sequences' out=[] id_matrix=self.identity_matrix() id_matrix_string='' for i in id_matrix: id_matrix_string+=str(i) i_string=0 start_mismatch='' gaps=0 while i_string+ min_match_length< len(id_matrix_string): #print id_matrix_string[i_string:i_string+ min_match_length+1] #print self.best_ali[0][i_string:i_string+ min_match_length+1] if self.seq_of(self.order[1])[i_string]=='-': gaps+=1 if (id_matrix_string[i_string:i_string+ min_match_length+1]=='1'*min_match_length+'0' and not invert) or (invert and id_matrix_string[i_string:i_string+ min_match_length+1]=='0'*min_match_length+'1'): start_mismatch=i_string+ min_match_length #cannot be 0 by definition, so I can test if start_mismatch # print "start found" if (id_matrix_string[i_string:i_string+ min_match_length+1]=='0'+('1'*min_match_length) and not invert) or ( invert and id_matrix_string[i_string:i_string+ min_match_length+1]=='1'+('0'*min_match_length)): # print "stop found" stop_mismatch=i_string if start_mismatch and stop_mismatch-start_mismatch +1 >= min_length: stop_mismatch-=gaps start_mismatch-=gaps out.append( (start_mismatch,stop_mismatch) ) ###if negative frame, you can have: [(564, 507)] i_string+=1 return out def all_mismatches(self, count_gaps=True): """parse a 2 seq alignment and return the coordinates of local mismatches in a list (first position is 0) """ out=[] if self.nseq()!=2: raise Exception, 'cannot use this function (alignment.local_mismatches) on more or less than 2 sequences' for pos in range(self.length()): if (self.seq_of(self.order[0])[pos]!=self.seq_of(self.order[1])[pos] ) and (count_gaps or (not count_gaps and self.seq_of(self.order[0])[pos]!='-' and self.seq_of(self.order[1])[pos]!='-')): out.append(pos) return out def local_matches(self, min_match=3 ): return self.local_mismatches(min_match, 1, 1) def matches(self, min_match=3): """same logic as the previous functions. This is RELIABLE. Output is basically a condensation of the identity matrix, additionally with the filtering out of short matches output: list of [start_match, end_match] , with indexes which are included in the match and 1-based. """ out=[] id_matrix=self.identity_matrix() start_match=None now_in_match=False for i in range(len(id_matrix)): if id_matrix[i]==1: if not now_in_match: start_match=i now_in_match=True else: if now_in_match: now_in_match=False if (i-start_match)>=min_match: out.append((start_match, i-1)) if now_in_match: now_in_match=False if (i-start_match)>=min_match: out.append((start_match, i-1)) return out def sequence_identity_of(self, title_1, title_2, dont_count_gaps=False, dont_count_terminal_gaps=False): """ Returns the sequence identity of title_1 and title_2. The columns which are gaps in both sequences are removed before computing. if dont_count_terminal_gaps , the unaligned tails are not considered; if dont_count_gaps, all columns in which one of the sequences have a gap are not considered. """ seq_1= self.seq_of(title_1) seq_2= self.seq_of(title_2) identical_pos=0; gaps_pos=0 #those in title1 plus those in title2 not aligned to those in title1 #removing positions with gaps in both seqs pos_to_remove=[] for pos in range( self.length() ): if seq_1[pos]=='-' and seq_2[pos]=='-': pos_to_remove.append(pos) for i in range( len(pos_to_remove)-1, -1, -1): seq_1= seq_1[:pos_to_remove[i]]+seq_1[pos_to_remove[i]+1:] seq_2= seq_2[:pos_to_remove[i]]+seq_2[pos_to_remove[i]+1:] # extreme cases: sequences are just gaps are empty strings if any( [all([c=='-' for c in seq_1]), all([c=='-' for c in seq_2])] ) or not seq_1 or not seq_2: return 0.0 #computing nterminal and cterminal gap tails if dont_count_terminal_gaps: c_terminal_gaps_seq1=0 n_terminal_gaps_seq1=0 n_terminal_gaps_seq2=0 c_terminal_gaps_seq2=0 while seq_1[n_terminal_gaps_seq1]=='-': n_terminal_gaps_seq1+=1 while seq_1[-1-c_terminal_gaps_seq1]=='-': c_terminal_gaps_seq1+=1 while seq_2[n_terminal_gaps_seq2]=='-': n_terminal_gaps_seq2+=1 while seq_2[-1-c_terminal_gaps_seq2]=='-': c_terminal_gaps_seq2+=1 n_terminal_gaps=max(n_terminal_gaps_seq1, n_terminal_gaps_seq2) #only one of the two can be different than 0, for construction c_terminal_gaps=max(c_terminal_gaps_seq1, c_terminal_gaps_seq2) #only one of the two can be different than 0, for construction for pos in range( len(seq_1) ): if seq_1[pos]=='-': gaps_pos+=1 elif seq_2[pos]=='-': gaps_pos+=1 elif seq_1[pos]==seq_2[pos]: identical_pos+=1 if not identical_pos: return 0.0 #to avoid dividing by zero when they share no positions if dont_count_gaps: return float(identical_pos)/( self.length() -len(pos_to_remove) - gaps_pos) elif dont_count_terminal_gaps: return float(identical_pos)/( self.length() -len(pos_to_remove) - n_terminal_gaps - c_terminal_gaps) else : return float(identical_pos)/( self.length() -len(pos_to_remove) ) def weighted_seq_identity_of(self, title, with_coverage=True, exclude={}): """ Scores a sequence against the rest of the alignment, returning a float from 0.0 to 1.0 -- but the limit values depend on the alignment, it almost never can reach 1. It proceeds in the following way: it compares the input title seq with any other sequence. For each sequence, a weighted score is assigned in this way: for each position in which none of the two sequences have a gap, a weight is considered, as the percent of conservation of the aminoacid of the other (non-input title) sequence. The weighted score for this pair of sequences is the sum of identity flags times the weigth for each position (identity flag: 1 if the two aminoacid are identical, 0 otherwise), divided by the total sum of weights for this sequence. If the input title seq has no alignemnt overlap with any other sequence in the alignemnt, it is not counted for the final score. The average and std deviation of this measure is an indication of the conservation of a profile """ if self.nseq()==1: return 1.0 exclude_hash={title:True} for k in exclude: exclude_hash[k]=True conservation_m = self.conservation_map( exclude=exclude_hash ) coverage_per_position=[] #meaning: the proportion of seqs with something not a gap / tot n seqs for element in conservation_m: coverage= sum([ element[k] for k in element if k !='-' ]) coverage_per_position.append(coverage) #start and stop: zero based scores=[] for t in self.titles(): if not t in exclude_hash: total_weight=0; score_this_seq=0 pos_in_ali=0 for pos in range(self.length()): #now pos_in_ali reflects position we're in, 1 based, in the original alignment with no seq added. candidate_seq_here=self.seq_of(title)[pos] profile_seq_here= self.seq_of(t)[pos] if profile_seq_here =='-': continue if not with_coverage and candidate_seq_here=='-': continue weight= conservation_m [pos][profile_seq_here] if with_coverage: weight*= coverage_per_position[pos] is_identity= candidate_seq_here == profile_seq_here score_this_seq+= int( is_identity )*weight total_weight+=weight if total_weight: scores.append( score_this_seq/float(total_weight) ) #print title.split()[0]+' -- '+t.split()[0]+' w: ', total_weight, 's:', score_this_seq, 'ws:', score_this_seq/float(total_weight) #raw_input('next?') if not len(scores): raise Exception, "alignment-> weighted_seq_identity_of ERROR the sequence "+title.split()[0]+' has not any aligned position with any of the other sequences!' return sum(scores)/float(len(scores)) def sequence_identity_hash(self, dont_count_gaps=False, dont_count_terminal_gaps=False ): """Returns a simmetrical hash with the sequence identities of every sequence in the alignment against each other. the simmetrical hash can be queried using .get(title1, title2) Nterminal gaps are ignored if dont_count_terminal_gaps==True; All gaps are ignored if dont_count_gaps==True """ h=simmetrical_hash() for index_1 in range(len(self.titles() )): title_1= self.titles()[index_1] h[title_1]={} for index_2 in range(index_1+1, len(self.titles())): title_2= self.titles()[index_2] h[title_1][title_2]= self.sequence_identity_of(title_1, title_2, dont_count_gaps=dont_count_gaps, dont_count_terminal_gaps=dont_count_terminal_gaps) return h def average_sequence_identity(self, dont_count_gaps=False, dont_count_terminal_gaps=False): """ """ if self.nseq()==1: return 1.0 values=[] h=self.sequence_identity_hash(dont_count_gaps=dont_count_gaps, dont_count_terminal_gaps=dont_count_terminal_gaps) for k1 in h: for k2 in h[k1]: values.append(h.get(k1, k2)) return sum(values)/float(len(values)) def average_sequence_identity_of(self, title='', dont_count_gaps=False, dont_count_terminal_gaps=False ): """Returns the average sequence identity of the sequence with "title" with all the other sequences in the profile. positions in which "title" has a gap are considered always as mismatch, unless dont_count_gaps is set to True. in this case those positions are excluded from computation (also when dividing for total length). If dont_count_terminal_gaps is set to True, the unaligned tails are excluded from computation. """ if len( self.titles() )>1: seq=self.seq_of(title) seq_identity_per_title={} for tit in self.titles(): if tit !=title: seq_identity_per_title[tit]=self.sequence_identity_of(title, tit, dont_count_gaps=dont_count_gaps, dont_count_terminal_gaps=dont_count_terminal_gaps ) return sum( seq_identity_per_title.values() ) / len(seq_identity_per_title.keys()) else: return 1.0 def conservation_score(self, title='', matrix={}): """This function computes a conservation score of one sequence (title) with the rest of the alignment. At each position, the average blosum score between the aa at this position and the aas of each other sequence which doesn't bring a gap here is multiplied IMP times, where IMP is the relative importance of this position of the alignment and is defined by the proportion of profile sequences (all apart from title) which don't carry a gap here. When all other sequences carry a gap, 0 is added. When the "title" sequence carry a gap, -4 (gap_score) is added, multiplied IMP times. Lastly, the score is divided by the lenght of the alignment. The variable matrix provided should be a hash-like scoring matrix, a hash of hashes with as final values integers or floats for the evaluation of pairwise amino acid alignments. another possibilites is providing the string "identity", which simulates the identity matrix if no matrix is provided, blosum62 is used """ if not title: title=self.titles()[0] gap_score, insertion_score =-4, 0 if not matrix: matrix=load_blosum() seq=self.seq_of(title) index=self.remove(title, True) score=0 for pos in range(len(seq)): aa1=upper(seq[pos]) score_this_pos, gapped=0, 0 for tit in self.titles(): aa2=upper(self.seq_of(tit)[pos]) if aa2=='-': gapped+=1 elif aa1!='-': if matrix == 'identity': score_this_pos+=bool( aa1 == aa2 ) else: score_this_pos+= blosum(aa1, aa2, matrix) relative_importance=( 1- (float(gapped)/len(self.titles())) ) #the relative importance of this position is defined by the fraction of sequences of the profile that are not gapped if len(self.titles() ) == gapped: average_score_this_pos=insertion_score else: average_score_this_pos=float(score_this_pos)/( len(self.titles() ) - gapped ) if aa1!='-': final_position_score = average_score_this_pos*relative_importance else: final_position_score = gap_score*relative_importance #print pos, gapped, relative_importance, final_position_score score+=final_position_score score=float(score)/len(seq) #normalizing per length self.add(title, seq, index) return score def average_conservation_score(self, matrix={}): """ This functions returns the average and the std deviation of the conservation scores of the sequences in the alignment. These values represent in a way how well defined (or how loose) is a profile alignment """ scores=[] if not matrix: matrix=load_blosum() for title in self.titles(): scores.append( self.conservation_score(title, matrix) ) average=0 for s in scores: average+=s average/=len(self.titles()) std_deviation=0 for s in scores: std_deviation += pow( (s-average), 2 ) std_deviation/=len(self.titles()) return average, std_deviation def sort(self, o_function=None, inplace=True): """ This function can be used to change the order of the sequences in the alignment. similarly to list.sort, it accepts a sorting function which must accept two [title, seq] arguments. If the function is not specified, the alignment is sorted alphabetically by title. If inplace is False, the self alignment is left untouched and another alignment istance is returned instead. """ if not o_function: def o_function(x, y): x_title, x_seq, y_title, y_seq=x[0], x[1], y[0], y[1] return cmp(x_title, y_title) if inplace: return self.order.sort(o_function, key=lambda x: (x, self.seq_of(x)) ) else: a=alignment(self.diz) a.sort(o_function, True) return a def copy(self): return deepcopy(self) def columns(self, position, length=1, inplace=False, remove_empty_seqs=False): """ columns(self, position, length=1, inplace=False, remove_empty_seqs=False) This function extracts a subalignment of a certain portion, starting from position (first is 0) and long length. """ if self.length()<position+length: raise Exception, "alignment->columns ERROR: position+length called too high, > than length of alignment: "+str(position)+'+'+str(length)+'>'+str(self.length()) if not inplace: a=self.copy() else: a=self; a.reset_derived_data() for title in a.titles(): a.diz[title]=a.seq_of(title)[position:position+length] if remove_empty_seqs: a.remove_empty_seqs() if not inplace: return a def remove_empty_seqs(self): """ Delete those sequences which contain only gap in the alignment, and return the list of their title.""" out=[] all_titles=list(self.titles()) for t in all_titles: # if 'TR_hit3' in t: # print '*******************', [replace_chars(self.seq_of(t), '-', '')] if not replace_chars(self.seq_of(t), '-', ''): out.append(t) self.remove(t, True) if out: self.reset_derived_data() return out def replace_columns(self, position, length, subalignment, inplace=True): """ This function replace a portion of the alignment, starting from position (first is 0) and long length, with another alignment whose titles must be identical """ if self.length()<position+length: raise Exception, "alignment->replace_columns ERROR: position+length called too high, > than length of alignment: "+str(position)+'+'+str(length)+'>'+str(self.length()) if not inplace: a=self.copy() else: a=self; self.reset_derived_data() if sorted(self.diz.keys())!=sorted(subalignment.diz.keys()): #checking titles. #write_to_file( join( sorted(self.diz.keys()), '\n'), 'a') #write_to_file( join(sorted(subalignment.diz.keys()), '\n'), 'b') raise Exception, "alignment->replace_columns ERROR: the two alignments must have the same titles! " for title in self.titles(): a.diz[title]=a.diz[title][:position]+subalignment.diz[title]+a.diz[title][position+length:] if not inplace: return a def concatenate_with(self, other_ali, inplace=False): """ This function sum the self with another alignment with identical titles: the sequences are concatenated """ if self.titles()!=other_ali.titles(): #checking titles. raise Exception, "alignment->concatenate_with ERROR: the two alignments must have the same titles! " if not inplace: a=self.copy() else: a=self; self.reset_derived_data() for title in self.titles(): a.diz[title]=a.diz[title]+other_ali.diz[title] if not inplace: return a def delete_columns(self, position, length, inplace=True): """ This function delete subalignment of a certain portion, starting from position (first is 0) and long length. """ if self.length()<position+length: raise Exception, "alignment->delete_columns ERROR: position+length called too high, > than length of alignment: "+str(position)+'+'+str(length)+'>'+str(self.length()) if not inplace: a=self.copy() else: a=self; self.reset_derived_data() for title in self.titles(): a.diz[title]=a.diz[title][:position]+a.diz[title][position+length:] if not inplace: return a def realign(self, program='mafft', inplace=True, verbose=False, char_replace='*U', protein=True, mafft_options=" --auto "): """ This function uses mafft to realign the sequences in the alignment. The alignment is returned, unless inplace==True, in this case the self object is modified. """ if self.titles(): test_writeable_folder(temp_folder, 'defined temp folder') #checking presence of stars, which will be lost when aligning with mafft forbidden_dict={} for c in char_replace: forbidden_dict[c]=1 stars_chars={} #title -> list of positions of any forbibbed char (char_replace), 0 based for title in self.titles(): seq_no_gap=nogap(self.seq_of(title)) for index, char in enumerate( seq_no_gap ): if char in forbidden_dict: if not stars_chars.has_key(title): stars_chars[title]=[] stars_chars[title].append( [index, char] ) if stars_chars.has_key(title): self.set_sequence(title, replace_chars(self.seq_of(title), forbidden_dict, 'X') ) if program=='mafft': self.display(temp_folder+'realigning_with_mafft') mafft_options+=' --amino ' *int(bool(protein)) bbash('mafft '+mafft_options+' '+temp_folder+'realigning_with_mafft > '+temp_folder+'realigning_with_mafft.aligned', verbose) # if int(bbash('ls -s '+temp_folder+'realigning_with_mafft.aligned').split()[0])<9: #file empty. Maybe the input sequence is aminoacid and the program mafft didn't understand it (often happens for small alignments) # bbash('mafft --auto --amino '+temp_folder+'realigning_with_mafft > '+temp_folder+'realigning_with_mafft.aligned', verbose) b=alignment() seq_index=0 for title, seq in parse_fasta(temp_folder+'realigning_with_mafft.aligned'): old_full_title= self.titles()[seq_index] #assert self.fill_title(title) == old_full_title b.add( old_full_title, seq) seq_index+=1 if not b.titles(): raise Exception, "ERROR realign with mafft failed! output alignment is empty." for title in stars_chars: seq=b.seq_of(title) for pos_seq, char in stars_chars[title]: pos_ali=b.position_in_ali(title, pos_seq+1) seq= seq[:pos_ali-1]+ char +seq[pos_ali:] b.set_sequence(title, seq) if not b.titles(): raise AliError, "ERROR mafft failed in realigning!" else: raise Exception, "ERROR only mafft is currently supported" #elif ... #add more programs if needed if not inplace: return b else: self.diz=b.diz; self.order=b.order; self.reset_derived_data() def realign_columns(self, position=0, length=0, inplace=True, input_list=[], protein=True ): """ This function realigns only a portion of the alignment, keeping the rest identical. The alignment is returned, unless inplace==True, in this case the self object is modified. The list_input keyarg can be used to provide more than one column portion at the time (provide a list of [pos, length] elements ). in this case input position and length are not taken into account """ if not input_list and length: input_list=[[position, length]] if not inplace: a=self.copy() else: a=self; self.reset_derived_data() shrinked_length=0 for pos, l in input_list: if self.length()<pos-shrinked_length+l: raise Exception, "alignment->realign_columns ERROR: position+length called too high, > than length of alignment: "+str(pos)+'+'+str(l)+'>'+str(self.length()) cols=a.columns(pos-shrinked_length, l) removed_titles= cols.remove_empty_seqs() if (cols.nseq()>1): try: #printerr( str(pos)+'-'+str(l)+', ready to realign!', 1) cols.realign(protein=protein) #realigning on a try statement cause some particular columns may make crash the alignment program. In this case, we keep the cols as they were before realigning except: pass for title in removed_titles: cols.add(title, '-'*cols.length() ) a.replace_columns(pos-shrinked_length, l, cols) shrinked_length+=l-cols.length() if not inplace: return a def find_desert_columns(self, max_non_gaps=1, length_clusters=2, only_titles=[], join_neighbours=0): """ This functions parse the alignment looking for columns in which a single sequence has something different from a gap. Contiguos desert columns are grouped, and those whose length is > than length_clusters are returned. They are returned in the format: [[pos1, length1], [pos2, lenght2], ... ] positions are 0-based! max_non_gaps determines how many seqs can have a non gap in the columns returned. If it is a integer, it is taken as the maximum n of seqs, while if it is a float it is intended as a proportion over the total number of seqs (rounded by excess! careful this is to put the min value to 1 instead than to 0). If only_titles is specified, it is necessary that the columns to realign have non-gaps only for these titles. """ if type(max_non_gaps)==float: max_non_gaps=int(max_non_gaps*self.nseq())+1 out=[] if len(only_titles)!=1: n_last_columns_matching=0 for pos in range(self.length()): non_gaps=0 non_gaps_titles=[] for title in self.titles(): if self.seq_of(title)[pos]!='-': non_gaps+=1 non_gaps_titles.append(title) if non_gaps<=max_non_gaps and not only_titles or all([i in only_titles for i in non_gaps_titles]): n_last_columns_matching+=1 else: if n_last_columns_matching >= length_clusters: out.append([pos-n_last_columns_matching, n_last_columns_matching]) n_last_columns_matching=0 if n_last_columns_matching >= length_clusters: out.append([pos-n_last_columns_matching+1, n_last_columns_matching]) ## joining neighbours if join_neighbours: removed_indexes=[] for region_index in range(len(out[:-1])): start_region, length_region=out[region_index] end_region=start_region+length_region-1 next_start, next_length=out[region_index+1] next_end= next_start+next_length-1 if next_start-end_region -1 <= join_neighbours: #updating the next one to include the current one. marking the next one to be removed afterwards removed_indexes.append(region_index) out[region_index+1]= [ start_region, next_end-start_region+1 ] for i in removed_indexes[::-1]: out.pop(i) #printerr(out, 1) #DEBUG debug return out def shrink(self, only_titles=[]): """ This functions detects the desert columns clusters in the alignment and realigns them""" desert_columns=self.find_desert_columns(1, 2, only_titles=only_titles) #, join_neighbours=2) self.realign_columns(input_list=desert_columns) self.reset_derived_data() def conserved_residues(self, mode='count'): """ This function compute the number of columns in the alignment that have high conservation, that is to say, all aminoacids are identical or similar to each other according to similar_aas function. if mode=='count', the number of the columns is reported. if mode=='proportion', the proportion of the columns (respect to the length of the alignment) is reported. if mode=='positions', the list of positions is returned ( NB 0 based! ) """ positions=[]; n_conserved=0 for pos in range( self.length() ): if self.is_column_conserved(pos): if mode=='positions': positions.append(pos) n_conserved+=1 if mode=='count': return n_conserved elif mode=='positions': return positions elif mode=='proportion': return float(n_conserved)/self.length() def is_column_conserved(self, pos, mode='similar'): """ Utility used by previous function, conserved residues. This function checks if a column of the alignment is conserved, this meaning either that all characters must be identical (mode=='identical') or that they all must be all similar aminoacids, computed with function similar_aas (mode=='similar', default). NB position is 0 based.""" chars_in_this_pos={} for title in self.titles(): if mode=='identical': for c in chars_in_this_pos: if self.seq_of(title)[pos]!=c: return False elif mode=='similar': for c in chars_in_this_pos: aa_this_pos=self.seq_of(title)[pos] if not c==aa_this_pos and not similar_aas(aa_this_pos, c): return False chars_in_this_pos[self.seq_of(title)[pos]]=1 return True def sort_by_completeness(self, inplace=True): """ This function sort the titles by completeness of the sequence. The first ones will be the more complete ones, more suitable to be queries. if inplace == False, the ordered list of titles is returned instead of set to the .order attribute of this alignment. """ cons_map=self.conservation_map() temp_diz_to_sort={} for title in self.titles(): score=0 for pos in range(len(cons_map)): importance_of_this_pos=0 max_cons_lett= ['', 0] for k in cons_map[pos]: if cons_map[pos][k] > max_cons_lett[1] and k!='-': max_cons_lett= [k, cons_map[pos][k] ] if self.seq_of(title)[pos]!='-': score+=max_cons_lett[1] else: score-=max_cons_lett[1] temp_diz_to_sort[title]= float(score) / self.length() sorted_titles=temp_diz_to_sort.keys() sorted_titles.sort(key=temp_diz_to_sort.get, reverse=True) if inplace: self.order= sorted_titles else: return sorted_titles def set_sequence(self, title, sequence): """ Like add, but it is meant to change existing sequences. If the alignment does not have title, an expection is raised""" if not self.diz.has_key(title): raise Exception, "alignment->set_sequence ERROR the alignment does not have title: "+str(title) self.diz[title]=sequence self.reset_derived_data() def transfer_alignment(self, other_ali, neutral_chars='UX*', dont_shrink=False): """ This function computes a common alignment between two existing alignments, exploiting common sequences. If the two alignments have no common titles, an exception is raised""" def find_prot_no_gap(names_dict, position, ali): for name in names_dict: if ali.seq_of(name)[pos]!='-': return name return False this_ali=self.copy(); other_ali=other_ali.copy() diz_common={} #contains the names of the common prot between this and other alignment for title_self in this_ali.titles(): if title_self in other_ali.titles(): diz_common[title_self]=1 if len(diz_common)==0: raise Exception, 'alignment->transfer_aligment ERROR: no common proteins between the two alignments' position_map=[-1 for i in range(this_ali.length())] #maps positions of this_ali to the positions of other_ali prot=diz_common.keys()[0] pos=0 #this keeps track of the position in this_ali ; pos_global keeps track of the position in other ali while pos <this_ali.length(): if this_ali.seq_of(prot)[pos]=='-': prot=find_prot_no_gap(diz_common, pos, this_ali) if not prot: #none of the common proteins has something different from a gap in position: pos. pos_global=position_map[pos-1]+1 #becomes 0 if pos is 0, for construction position_map[pos] = pos_global for p_name in other_ali.titles(): new_sequence= other_ali.seq_of(p_name)[:pos_global]+'-'+other_ali.seq_of(p_name)[pos_global:] #if 'sps_Plasmodium_knowlesi_2007-2008/1-811' in p_name: print ">before: \n"+other_ali.seq_of(p_name)+'\n>after: \n'+new_sequence other_ali.set_sequence(p_name, new_sequence) prot=diz_common.keys()[0] else: aa=this_ali.seq_of(prot)[pos] pos_global=position_map[pos-1]+1 #becomes 0 if pos is 0, for construction while position_map[pos]==-1: aa_global=other_ali.seq_of(prot)[pos_global] if aa_global!='-': if lower(aa_global)==lower(aa) or (aa_global in neutral_chars) or (aa in neutral_chars): position_map[pos]=pos_global else: printerr( other_ali.seq_of(prot)[pos_global-4:pos_global+4] +' != '+ this_ali.seq_of(prot)[pos-4:pos+4] , 1) raise Exception, 'alignment->transfer_alignment ERROR AminoAcids dont correspond in pos '+str(pos)+' | '+str(pos_global)+' ('+aa_global+' != '+aa+') for title: '+ prot else: pos_global+=1 pos+=1 #over. let's add initial and final gaps, and add the sequences to other_ali for p_name in this_ali.titles(): # ->up seq='' for pos in range(len(position_map)): if pos==0: for i in range(position_map[0]): seq+='-' #adding initial gaps else: for i in range(position_map[pos]-position_map[pos-1]-1): seq+='-' seq+=this_ali.seq_of(p_name)[pos] for i in range(other_ali.length()-1 -position_map[-1]): seq+='-' #adding final gaps if not diz_common.has_key(p_name): other_ali.add(p_name, seq ) #correcting desert columns defect: if not dont_shrink: other_ali.shrink() return other_ali def remove_redundancy_with_t_coffee(self, max_pair_identity=0.5, inplace=True): """ This function removes sequences keeping the most representative ones. It uses the seq_reformat +trim subroutine of t_coffee, therefore it should not be used on datasets too large. To avoid problems with t_coffee titles, the sequences are written in a temporary fasta file using their id (index in the alignment), and read again later. """ temp_ali_h=open(temp_folder+'temp_ali_for_removing_redundancy.fa', 'w') for index, title in enumerate( self.titles() ): print >> temp_ali_h, ">"+str(index)+'\n'+fasta(self.seq_of(title)) temp_ali_h.close() bbash('t_coffee -other_pg seq_reformat -in '+temp_folder+'temp_ali_for_removing_redundancy.fa'+' -action +trim _aln_%%'+str(int(max_pair_identity*100))+'_ -output fasta_aln > '+temp_folder+'temp_ali_for_removing_redundancy.trimmed.fa') new_ali=alignment() for title, seq in getfastalite(open(temp_folder+'temp_ali_for_removing_redundancy.trimmed.fa', 'r')): original_index=int(title) new_ali.add(self.titles()[original_index], seq) if inplace: self.reset_derived_data(); self.__dict__=deepcopy(new_ali.__dict__); else: return new_ali def positions_of_similar_aas(self): """ This function returns the positions (1-based) of the columns which contains aminoacids all identical OR similar to each other according to the similar_aas function. Thought for pairwise alignment, it works also with larger ones. Positions where gaps are present are never returned since they are not considered similar to anything """ if len(self.titles())>=1: outlist=[] for pos in range(self.length()): #pos is 0 based main_aa=self.seq_of(self.titles()[0])[pos] similar_aa_in_this_pos=True for title in self.titles()[1:]: aa=self.seq_of(title)[pos] similar_aa_in_this_pos = similar_aa_in_this_pos and ( similar_aas(main_aa, aa) or main_aa==aa ) if similar_aa_in_this_pos: outlist.append(pos+1) return outlist def convert_sequences(self, sfunction, *args, **keyargs): """ This apply a certain function on every sequence of the alignment. Can be useful to convert the alignment in uppercase or lowercase: for example, provide the function upper from the module string as sfunction""" for title in self.titles(): try: new_seq= sfunction( self.seq_of(title) , *args, **keyargs) except: printerr("alignment->convert_sequences ERROR can't use provided function"); raise self.set_sequence(title, new_seq) def remove_duplicated_transcripts(self, identity_threshold, naming_function=None, inplace=False, verbose=False): """ This function is thought to remove redundant predictions coming from a p2g prediction on a set of RNAs like a EST database. The function forms clusters of the alignment using the function self.clustering and the specified identity threshold (mode=0 is used --> dont_count_terminal_gaps, and no reclustering) If then derives a single sequence from each cluster, computing a consensus for each column to exploit all information. The id of the longest sequence in the cluster is used. optionally, you can provide a naming function which must have the form: function(cluster_alignment) -> new_title """ out_ali= self.__class__() #creating a new instance of the same class of self clusters= self.clustering(threshold=identity_threshold, dont_remove_empty_columns=True) for cluster_ali in clusters: if len(cluster_ali.titles())>1: if naming_function is None: cluster_title= sorted( cluster_ali.titles(), key=lambda x: len( nogap(cluster_ali.seq_of(x) )), reverse=True )[0] else: cluster_title= naming_function(cluster_ali) cluster_seq='' cmap=cluster_ali.conservation_map(dont_save=True) for pos in range(cluster_ali.length()): sorted_aa_this_pos = sorted( cmap[pos].keys(), key= lambda x: cmap[pos].get, reverse=True) #sorted by representation if len(sorted_aa_this_pos)>1 and sorted_aa_this_pos[0]=='-': consensus_this_pos=sorted_aa_this_pos[1] else: consensus_this_pos=sorted_aa_this_pos[0] cluster_seq+=consensus_this_pos if verbose: printerr( join([ t.split()[0] for t in cluster_ali.titles()], ', ')+' --> '+cluster_title, 1 ) else: cluster_title=cluster_ali.titles()[0] cluster_seq= cluster_ali.seq_of( 0 ) out_ali.add(cluster_title, cluster_seq) #sort ali? if inplace: self.__dict__ = out_ali.__dict__ else: return out_ali def clustering(self, threshold, mode=0, reclustering=False, reclustering_factor=2, dont_remove_empty_columns=False, outclass=None, min_overlap=10): """ This function analyze the self alignment and split it in clusters of similar sequences. Threshold (sequence identity) is used when doing pairwise comparisons to decide whether or not to cluster the two sequence together. mode defines how the sequence identity is computed: 0 [DEFAULT] -> dont_count_terminal_gaps ; 1 -> dont_count_gaps ; 2 -> count gaps you can also directly provide a sequence_identity_hash-like data structure as mode, in this case you can provide any measure of identity- for details on howo the sequence identity is computed, see function alignment.sequence_identity_hash If reclustering==True, then an additional procedure is performed after clustering for the clusters formed by a single sequence: a second step of clustering is performed, with a more permissive threshold (threshold/reclustering_factor) by default, empty columns are removed from every output alignment, to turn this off use dont_remove_empty_columns=True the list of output alignments is returned. Their class is alignment by default, but you can choose it with the outclass argument (anyway, no attributes are copied) """ output=[] #will contain: list of output alignments clusters = {}; clusters_next_id = 0 if outclass is None: outclass=alignment if self.nseq()<2: o=outclass() for title in self.titles(): o.add( title, self.seq_of(title) ) return [o] # building identity_hash {title:{other_titles_in_alignment:identity}} if mode == 1: h = self.sequence_identity_hash(dont_count_gaps=True) elif mode == 2: h = self.sequence_identity_hash() elif mode==0: h = self.sequence_identity_hash(dont_count_terminal_gaps=True) else: h=mode ## main program algorythm for index_1 in range(len(self.titles())): title_1 = self.titles()[index_1] if clusters.has_key(title_1): this_cluster_id = clusters[title_1] else: this_cluster_id = clusters_next_id clusters_next_id += 1 clusters[title_1] = this_cluster_id for index_2 in range(index_1+1, len(self.titles())): title_2 = self.titles()[index_2] if not min_overlap or len([ True for p in range(self.length()) if self.seq_of(title_1)[p]!='-' and self.seq_of(title_2)[p]!='-' ]) >= min_overlap: # checking in how many columns we have some non gap in both sequences. if the number is very low, we can have high seq id even idf the sequences don't look alike if h.get(title_1, title_2) >= threshold: if clusters.has_key(title_2) and not clusters[title_2]==this_cluster_id : # title_2 was already in a cluster. let's update all other titles to be in this cluster clusters_to_update = clusters[title_2] for t in self.titles(): if clusters.has_key(t) and clusters[t] == clusters_to_update: clusters[t] = this_cluster_id else: clusters[title_2] = this_cluster_id c = {}#{c_id:alignment} for t in self.titles(): c_id = clusters[t] if not c.has_key(c_id): c[c_id] = outclass() c[c_id].add(t, self.seq_of(t)) if reclustering: solitaire_seqs={} #sequences left alone (a cluster of a single sequence) #print """The following sequences don\'t get the minimum threshold. #For them, a second clustering is performed reducing the threshold in one half. #""" for k in c.keys(): if c[k].nseq() == 1: for title in c[k].titles(): solitaire_seqs[title]=1 max_title=''; max_seq_id=0.0 for title2 in self.titles(): if title2!=title: if h.get(title, title2) > max_seq_id: max_seq_id=h.get(title, title2) max_title=title2 if h.get(title, max_title) >= threshold/float(reclustering_factor) and not solitaire_seqs.has_key(max_title): c[clusters[max_title]].add(title, self.seq_of(title)) #print ' --> '+title.split()[0].ljust(40)+' clustered with '+max_title.split()[0].ljust(50)+'identity '+str(h.get(title, max_title)) del c[clusters[title]] if not dont_remove_empty_columns: for k in c.keys(): c[k].remove_useless_gaps() sorted_c_ids = c.keys() def nseq_for_key(k): return c[k].nseq() sorted_c_ids.sort(key=nseq_for_key, reverse=True) for final_cluster_id in range(len(sorted_c_ids)): # the final_cluster_id is actually not used. old_c_id = sorted_c_ids[final_cluster_id] output.append(c[old_c_id]) return output def remove_redundancy(self, threshold, mode=0, dont_remove_empty_columns=False, outclass=None, min_overlap=10, inplace=True, silent=True, correspondance_hash={}, choose_repr=None): """ Modify in place or returns a copy of the alignment removing redundancy, so that in output all sequences will have sequence identity > threshold (between 0.0 and 1.0). Initially alignment is split into clusters with function clustering, then a single sequence is chosen for each cluster; normally this is done by maximizing the average sequence identity with the sequences removed in favor of this (the sequence closest to each cluster baricentre act as representative). But! you can instead provide a function to choose_repr ; this must be a function that accepts an alignment (self) and a list of titles, and returns a single title. If it returns None, the function will use the built-in representative choice procedure for that particular cluster.""" # building identity_hash {title:{other_titles_in_alignment:identity}} if mode == 1: h = self.sequence_identity_hash(dont_count_gaps=True) elif mode == 2: h = self.sequence_identity_hash() elif mode == 0: h = self.sequence_identity_hash(dont_count_terminal_gaps=True) clusters= self.clustering(threshold, mode=h, dont_remove_empty_columns=True, min_overlap=min_overlap) ## computing all clusters; providing identiti hash to avoid computing it twice out_titles=[] for c in clusters: if len(c.titles())>1: best_title=None if not choose_repr is None: best_title = choose_repr (self, c.titles() ) if best_title is None : #either choose_repr is None, or it returned None in this case so we want to use the default function average_seq_identities_within_cluster={} for t in c.titles(): average_this_t= sum( [ h.get(t, t2) for t2 in c.titles() if t!=t2 ]) / float(len(c.titles())-1) average_seq_identities_within_cluster[t]=average_this_t these_titles_sorted= sorted( c.titles(), key=average_seq_identities_within_cluster.get, reverse=True ) #for t in these_titles_sorted: print t.split()[0], average_seq_identities_within_cluster[t] best_title = these_titles_sorted[0] other_titles = [t for t in c.titles() if t!=best_title] else: best_title=c.titles()[0] other_titles=[] out_titles.append([best_title, other_titles]) #o.add(best_title, self.seq_of(best_title) ) for out_t, other_ts in out_titles: for discarded_t in other_ts: correspondance_hash[discarded_t]=out_t if not silent: write('REMOVED: '+ discarded_t.split()[0]+' >KEPT: '+out_t.split()[0], 1) if inplace: out_titles_h={} for t, other_ts in out_titles: out_titles_h[t]=1 titles= self.titles() for t in titles: if not t in out_titles_h: self.remove(t, True) if not dont_remove_empty_columns: self.remove_useless_gaps() else: if outclass is None: outclass=alignment o=outclass() for t, other in out_titles: o.add(t, self.seq_of(t) ) if not dont_remove_empty_columns: o.remove_useless_gaps() return o def build_tree(self, folder=None, tree_class=None, trimal_options=' -phylip -gt 0.1 -cons 33.33 ', phylogeny_options= '-c /home/mmariotti/software/selenoprofiles/libraries/salva_pipeline.config.for_selenoprofiles' ): """ Uses Salva pipeline to build a tree. Note: use only for amino acid sequences. A folder dedicated to compute the phylogeny is created (folder argument -- done in temp folder if not provided). it the tree_class argument is not defined (as default), the path to newick tree file computed is returned. Otherwise, one may provide a tree class (such as ete2.Tree), and this will be initialised with the mentioned tree file """ if self.nseq()<3: raise Exception, "ERROR can't build a tree with less than 3 sequences!" if folder is None: folder=temp_folder+'building_tree' folder=Folder(folder) # creating if necessary, adding "/" input_of_trimal_filename = folder+'ali.'+'.raw_ali' output_of_trimal_filename = folder+'ali.'+'.alg.clean' fasta_copy_of_output_of_trimal_filename = folder+'ali.'+'.aligned_fa' ungapped_fasta_copy_of_output_of_trimal_filename = folder+'ali.'+'.fa' ali_temp=alignment() for title in self.titles(): ali_temp.add(title, self.seq_of(title) ) ali_temp.convert_sequences(replace_chars, '*', 'X') ali_temp.remove_useless_gaps() ali_temp.display(input_of_trimal_filename) bbash('trimal -in '+input_of_trimal_filename+' -out '+output_of_trimal_filename+' '+trimal_options) bbash('trimal -in '+output_of_trimal_filename+' -out '+fasta_copy_of_output_of_trimal_filename+' -fasta ') write_to_file( join([">"+t+'\n'+nogap(s) for t, s in parse_fasta(fasta_copy_of_output_of_trimal_filename)], '\n') , ungapped_fasta_copy_of_output_of_trimal_filename) bbash('cd '+folder+' ; ReconstructingTrees.py '+phylogeny_options+' -f '+base_filename(output_of_trimal_filename)+' -d ./ ') ml_tree_files=bbash('find '+folder+' -name "'+'ali.tree.ml.*.nw'+'"').split('\n') if len(ml_tree_files)>1: rank_file=folder+'ali.tree.rank.ml' ranks={} for line in open(rank_file, 'r'): ranks[line.split()[0]]=float(line.split()[1]) best_model=sorted(ranks.keys(), key=ranks.get)[-1] #since they are negative, we want the highest one, the closest the zero ml_tree_file=folder+'ali.tree.ml.'+best_model+'.nw' else: ml_tree_file=ml_tree_files[0] tree_link=folder+'ali.best_tree' bbash('ln -s '+base_filename(ml_tree_file)+' '+tree_link) if tree_class is None: return tree_link return tree_class( tree_link ) def complete_word(uncomplete_word, dictionary, word_match=False): """given a word, returns the entry in the dictionary from where this word where cut. If it is not unique returns -1, if none is matching returns False ex: ('cat' {'carpet':1, 'catastrophe':1, 'bunny':1}) -->return 'catastrophe' if word_match==True: when the title would be called as non-unique, an additional check is performed: if a single title has a space or a tab right after the match, this is considered a good match and it is returned. ex: ('dog' , {'doggie':1, 'dogs':1, 'dog one':1}) --> 'dog one' is returned. if no word_match option is set, this would return -1 """ if dictionary.has_key(uncomplete_word): return uncomplete_word title_length=len(uncomplete_word) title_that_match='' for t in dictionary.keys(): # if len(t)>= title_length: # 'nothing' # print t[:title_length] if t[:title_length] == uncomplete_word: if title_that_match: #already on! not unique if word_match: #unless: word_match is on and a single title has a space or a tab right after the match. this match has more value than the other one. if t[title_length] in '\t ' and not title_that_match[title_length] in '\t ': title_that_match=t elif t[title_length] in '\t ' and title_that_match[title_length] in '\t ': return -1 else: return -1 else: title_that_match=t if not title_that_match: return False else: return title_that_match def remove_items_from_list(alist, item_list, inplace=False): """ Utility to remove efficiently multiple items from a list. The arguments are the input list and the list of items to remove. If inplace==True, the operation is performed directly on the input list, other wise a new list is returned""" if not inplace: outlist=list(alist) else: outlist=alist items_hash={} for item in item_list: items_hash[item]=1 for index in range(len(outlist)-1, -1, -1): if items_hash.has_key(outlist[index]): outlist.pop(index) return outlist def remove_items_from_list_with_indexes(alist, index_list, inplace=False, index_are_ordered=False): """ Same as the remove_items_from_list function , but the list of indexes to remove is provided instead of the item list. the function orders the list of indexes as first step. If they are already ordered, you can save this time by setting index_are_ordered = True """ if not inplace: outlist=list(alist) else: outlist=alist if index_list: if not index_are_ordered: index_list.sort() if index_list[-1] >= len(outlist): raise Exception, "remove_items_from_list_with_indexes ERROR one or more indexes are > than the length of the list: "+str(index_list)+' > '+str(len(outlist)) for item_index in range(len(outlist)-1, -1, -1): if item_index== index_list[-1]: #item found index_list.pop(-1) outlist.pop(item_index) if index_list: raise Exception, "remove_items_from_list_with_indexes oops" return outlist def getfastalite(cfile,order=1): line=cfile.readline() seq='' ord_list=[] #[ [title1, seq1], [title2, seq2] ... ] diz={} #{ title: seq } while line: if line[0]=='>': if seq: #not first line # print [title] if order==1: ord_list.append( [title, seq] ) else: diz[title]=seq title=del_white(replace(line[1:], '\n', '')) seq='' else: seq+=replace( replace(line, '\n', ''), '#', '') line=cfile.readline() if order==1 and seq: ord_list.append( [title, seq] ) elif seq: diz[title]=seq if order==1: return ord_list else: return diz def getfastaaln(cfile,order=1): #clustalW format line=cfile.readline() seq='' ord_list=[] #[ [title1, seq1], [title2, seq2] ... ] diz={} #{ title: seq } while not line or line[0]=='#' or line.startswith('CLUSTAL'): line=cfile.readline() # while not finished: cont_seq=0 while line: while line!='\n' and line.strip() and not line.strip()[0] in '*.:': title= join(line.split()[:-1], ' ') if title and line.split()[0]!='cons': # print [line] if len(ord_list)<=cont_seq: ord_list.append([title, '']) ord_list[cont_seq][1] += line.split ()[-1] cont_seq+=1 line=cfile.readline() line=cfile.readline() cont_seq=0 if order==1: return ord_list else: return diz def getfasta(text, order=1): """text=file.readlines(); the file should contain fasta sequences like ">title \n sequence". If order=1 it returns an ordered list such as [[title1, seq1], [title2, seq2], ...] ; if order=0 it returns a dictionary containing {key: title, value: related sequence}. Works with: fasta files (also alignments), commented fasta files, extended fasta format, modeller alignment format yap output (uses external function)**** not reported doesn't work with clustalW alignments. """ if len(text)==1: text=text[0].split('\r') text.append('!...END...!') alignchars=AA_LETT+'-*' #allowed characters in alignments; the * is meant to be found only at the end of the seq cont=0 #number of proteins (number of '>title') pos={} #key: protein_number (the cont associated to it); value: cline of the line with >title seq={} #output ord_list=[] #output ordered cline=0 #contatore linea for line in text: #fill pos (dictionary) if line!='': if line[0]=='>': pos[cont]=cline cont=cont+1 cline=cline+1 for n in pos: name=replace(replace(text[pos[n]][1:],'P1;',''),'\n','') #replacing P1 is to get Modeller format alignments # print 'name:'+name seq[name]='' done=0 i=0 ord_list.append([name,'']) ctrl=0 #volte che trova la sequenza dopo un > while done!=1: seqtoadd=replace(text[pos[n]+i],'\n','') # print 'seqtoadd:'+seqtoadd if set(alignchars).issuperset(set(seqtoadd)) and seqtoadd!='': if seqtoadd[-1]=='*': seqtoadd=seqtoadd[:-1] # print 'ordlist='+str(ord_list) ord_list[n][1]=ord_list[n][1]+seqtoadd seq[name]=seq[name]+seqtoadd ctrl=ctrl+1 else: if ctrl!=0: done=1 i=i+1 if order==1: return ord_list return seq reverse_complement_diz={'A':'T', 'T':'A','G':'C','C':'G', 'N':'N', 'X':'X', 'a':'t', 't':'a','g':'c','c':'g',} def reverse_complement(seq): out='' for i in range(len(seq)): if reverse_complement_diz.has_key(seq[len(seq)-1-i] ): out+=reverse_complement_diz[seq[len(seq)-1-i] ] else: out+=seq[len(seq)-1-i] return out def smith_waterman(seq1, seq2, gap_open=-4, gap_extension=-2, matrix={}, pssm=[]): """ This function computes the smith water alignment (local) between the two sequences in input, using the given gap_open and gap_extension parameters. The matrix used can be provided as an argument, or blosum62 called with blosum(a, b[, matrix]) is used. Alternatively, the pssm argument can be provided. This must be a list of hash with scores for each aminoacid. Returns: [aligned_seq1, aligned_seq2, start_1, end_1, start_2, end_2, score] notice that sequences returned are typically shorter than the input ones, as the alignment is local. indexes returned can be used to complete them. if no good scoring alignment is computed (all scores <0), then None is returned. """ m,n = len(seq1),len(seq2)#length of two sequences #generate DP table and traceback path pointer matrix #the DP table score= [ [ 0 for i in range(n+1) ] for j in range(m+1) ] pointer= [ [ 0 for i in range(n+1) ] for j in range(m+1) ] P=0; #initial maximum score in DP table max_score=P; #calculate DP table and mark pointers for i in range(1,m+1): for j in range(1,n+1): #score up if pointer[i-1][j]==1: penalty=gap_extension else: penalty=gap_open #can be only 0; 2 is impossible for construnction to be selected score_up=score[i-1][j]+penalty; #score_down if pointer[i][j-1]==2: penalty=gap_extension else: penalty=gap_open #can be only 0; 1 is impossible for construnction to be selected score_down=score[i][j-1]+penalty; #score diagonal if not pssm: match_score= blosum(seq1[i-1],seq2[j-1], matrix); else: if len(pssm)<=[i-1]: raise Exception, "ERROR the pssm provided is not long enough... position requested: "+str((i-1)) elif not pssm[i-1].has_key(seq2[j-1]): raise Exception, "ERROR key error in pssm. position: "+str((i-1))+' does not have a value for: '+seq2[j-1] match_score = pssm[i-1][seq2[j-1]] score_diagonal=score[i-1][j-1]+match_score score[i][j]=max(0,score_up,score_down,score_diagonal); if score[i][j]==0: pointer[i][j]=0; #0 means end of the path if score[i][j]==score_up: pointer[i][j]=1; #1 means trace up if score[i][j]==score_down: pointer[i][j]=2; #2 means trace left if score[i][j]==score_diagonal: pointer[i][j]=3; #3 means trace diagonal if score[i][j]>=max_score: max_i=i; max_j=j; max_score=score[i][j]; #END of DP table align1,align2='','';#initial sequences i,j=max_i,max_j;#indices of path starting point #traceback, follow pointers while pointer[i][j]!=0: if pointer[i][j]==3: align1=align1+seq1[i-1]; align2=align2+seq2[j-1]; i=i-1; j=j-1; elif pointer[i][j]==2: align1=align1+'-'; align2=align2+seq2[j-1]; j=j-1; elif pointer[i][j]==1: align1=align1+seq1[i-1]; align2=align2+'-'; i=i-1; #END of traceback align1=align1[::-1];#reverse sequence 1 align2=align2[::-1];#reverse sequence 2 if not align1: return None return [align1, align2, i+1, max_i, j+1, max_j, score[max_i][max_j]] def correct_sequence(seq1, seq2, special_chars=['U']): """ This function corrects a sequence in input using a second reference sequence which is identical except in positions where seq1 has special chars (typically U). seq1 is returned with all such mismatch positions changed into the letter found in seq2. If more mismatches are present (not only aligned to special chars) an exception is raised. The sequences can contain gaps, which are ignored (those in seq1 are returned in the same positions) """ index_no_gap=0; index2=-1 for index1 in range(len(seq1)): aa1=seq1[index1] if aa1!='-': index2+=1 aa2=seq2[index2] while aa2=='-': index2+=1 aa2=seq2[index2] if aa1!=aa2: if aa1 in special_chars: seq1=seq1[:index1]+aa2+seq1[index1+1:] else: raise Exception, "correct_sequence ERROR the sequences are different not only for special characters: "+seq1+' != '+seq2 return seq1 def dereference(filename): """ This function returns the absolute path of the destination of a symbolic link (if a symbolic link is linked to another symbolic link, it goes all the way to the last file) . If the file provided is not a symbolic link, its absolute path is simply returned. If it does not exists, an exception is raised. """ if not is_file(filename): raise Exception, "dereference ERROR file: "+filename+' was not found ' filename=abspath(filename) out_ls=bbash('ls -l "'+filename+'"') while out_ls.split()[-2]=='->': destination= out_ls.split()[-1] if destination[0]!='/': filename=directory_name(filename)+'/'+destination else: filename=destination out_ls=bbash('ls -l '+filename) return filename def fileid_for_temp_folder(filename): """ This returns the absolute and dereferenced path for a filename, with / and spaces replaced with _ """ return replace_chars(dereference(filename), '/ ', '_') def fastaindex(target_genome, silent=0, force=0): """Run fastaindex from the exonerate package on the target_genome and return the path to the index file, or just return the path if the file is found (unless force=1) If the user has no writing permissions in the folder where the genome file is, the index is created in the temporary folder and it is returned. When the index is created, a message is printed using functio write() unless silent=1 """ if '.' in base_filename(target_genome): index_file=join( target_genome.split('.')[:-1],'.' )+'.index' else: index_file=target_genome+'.index' if is_file(index_file) and not force: return index_file temp_index_file=temp_folder+fileid_for_temp_folder(target_genome)+'.index' if is_file(temp_index_file) and not force: return temp_index_file # again this command if the file is in the temporary folder cmnd='fastaindex '+target_genome+' '+temp_index_file if not silent: printerr( 'indexing the nucleotide database '+target_genome , 1) bbash(cmnd) try: test_writeable_folder( directory_name(index_file) ) bash('mv '+temp_index_file+' '+index_file) return index_file except: return temp_index_file def formatdb(target_file, is_protein=False, silent=0): """ Utility to format for blast with formatdb""" if not silent: printerr("attempting to format database: "+target_file+" (will crash if you don't have permissions) ", 1 ) test_writeable_folder(temp_folder, 'temp_folder'); test_writeable_folder( directory_name(target_file) ) temp_folder_for_formatting= temp_folder+'formatting_database' bbash('mkdir '+temp_folder_for_formatting+' && cd '+temp_folder_for_formatting+' && ln -s '+abspath(target_file)+' . && formatdb -i '+base_filename(target_file)+' -o T -p '+{False:'F', True:'T'}[bool(is_protein)]+' && mv '+base_filename(target_file)+'.* '+directory_name(abspath(target_file))) def fastafetch(split_folder, chromosome, target_genome, verbose=0, chars_not_allowed=[':']): """fecthing chromosome routine. File are fetched to a file named after the fasta title. chars_not_allowed is an iterable with characters which cannot appear in the output filename """ target_genome=abspath(target_genome) species_subfolder=Folder(split_folder+replace_chars(target_genome, '/', '_')) final_filename=species_subfolder+chromosome for char in chars_not_allowed: if char in final_filename: final_filename= replace_chars( final_filename , char, '{Ch'+str(ord(char))+'}') temp_filename=temp_folder+'fetching_chromosome.fa' if is_file(final_filename): 'file already there' else: #printerr('*** '+fastaindex(target_genome), 1) cmnd='fastafetch '+target_genome+' '+fastaindex(target_genome)+' "' +chromosome +'" > "'+ temp_filename+'"' service( ' ...fetching chromosome: '+chromosome ) # debug(cmnd) b=bash(cmnd, verbose) if b[0]==256 and "Could not open fasta index" in b[1]: #db not indexed? i'll do it! fastaindex(target_genome) bbash(cmnd, verbose) #trying again to fetch the chromosome after indexing the db elif b[0]==256 and "Could not find identifier" in b[1] and "|" in chromosome: #trying adding a pipe symbol in the end. This is because blast sometimes removes that. cmnd='fastafetch '+target_genome+' '+fastaindex(target_genome)+' "' +chromosome +'|" > "'+ temp_filename+'"' b=bash(cmnd, verbose) elif b[0]==256 and '_' in chromosome and is_number( chromosome.split('_')[0], True): ##SOME BLASTOUT DONT HAVE >GI BUT ARE LIKE >N_fastafilename. THIS IS FOR THEM. nseq_index=chromosome.split('_')[0] #chromosome=nseq_index service( ' ...using as index of sequence the number in blast output: '+nseq_index +' from header: '+chromosome ) cmnd='get_sequence_numberN.g -v NSEQ='+nseq_index+' '+target_genome+' > '+ temp_filename #debug(cmnd) bbash(cmnd, verbose) #checking if there is sequence in temp_folder+'tchrom' if bbash('head -n2 "'+ temp_filename+'"')=='': raise Exception, 'ERROR fetching chromosome; command_line: '+cmnd bbash('mv '+temp_filename+' "'+final_filename+'"' ) return final_filename ######## NB different from the function in profiles_classes def fastasubseq(subj_file, start, clength, out_file, pipecommand='', warning=False ): #start can be <0, in that case it becomes 0; lenght can be > than the nts at the right of start, in that case... ###NB starts with 0 """Utility to subseq fasta sequences. pipecommand is used for expert use: if you want to pass results through a pipe before going in out_file, you can use this. Also, if you want to append results instead of writing, use pipecommand='>' (so in the final command line it will appear >>) """ start=max(start, 0) cmnd='fastasubseq "'+subj_file+'" '+str(start)+ ' '+str(clength)+" "+pipecommand+"> "+out_file ss = bash(cmnd) try: if ss[0]!=0 and "Subsequence must end before end" in ss[1]: old_clength=clength clength= int( ss[1].split('\n')[0].split('(')[1][:-1] ) - start cmnd='fastasubseq "'+subj_file+'" '+str(start )+ ' '+str(clength)+" "+pipecommand+"> "+out_file ss=bash(cmnd) if warning: printerr('Fastasubseq: '+str(old_clength)+' bp not available, cutting the first '+str(clength), 1) if ss[0]!=0: raise Exception, "COMMAND "+cmnd+" ERROR in fastasubseq: \""+ss[1]+"\"" return clength #returning message for how much the sequence was actually cut except Exception, e: raise Exception, "COMMAND "+cmnd+" ERROR in fastasubseq: \""+ss[1]+"\"" def fasta(string, char_per_line=60): """ gets a sequence or a fasta file and return the same thing properly formatted """ out='' seq='' for cline in string.split('\n'): if cline[0]=='>': if seq: for i in range( (len(seq)-1) / char_per_line +1): out+= seq[i*60:(i+1)*60] +'\n' out+= cline+'\n' seq='' else: seq+=cline for i in range( (len(seq)-1) / char_per_line +1): out+= seq[i*60:(i+1)*60]+'\n' return out[:-1] def getfastaxff(text, order=1): # """text=file.readlines(); text (type= list) contains fasta sequences like ">title \n sequence \n #secondary str". If order=1 it returns an ordered list such as [[title1, seq1, ss1], [title2, seq2, ss2], ...] ; if order=0 it returns a dictionary containing {key: title, value: [related sequence }.""" text.append('!...END...!') alignchars=uppercase+'-*#0. ' #allowed characters in alignments # print alignchars cont=0 #number of proteins (number of '>title') pos={} #key: protein_number (the cont associated to it); value: cline of the line with >title seq={} ord_list=[] cline=0 #contatore linea for line in text: if line!='': if line[0]=='>': pos[cont]=cline cont=cont+1 cline=cline+1 for n in pos: name=replace(replace(text[pos[n]][1:],'P1;',''),'\n','').split(' ')[0] #for modeller file capting # print 'name:'+name seq[name]=['',''] done=0 i=0 ord_list.append([name,'','']) ctrl=0 #volte che trova la sequenza dopo un >. se trova la str secondaria fa *-1 while done!=1: seqtoadd=replace(text[pos[n]+i],'\n','') if set(alignchars).issuperset(set(seqtoadd)) and seqtoadd!='': if seqtoadd[-1]=='*': seqtoadd=seqtoadd[:-1] # print 'ordlist='+str(ord_list) if seqtoadd[0]=='#': seqtoadd=seqtoadd[1:] ctrl=ctrl*-1 # print 'seqtoadd:'+seqtoadd if ctrl<0: ord_list[n][2]=ord_list[n][2]+seqtoadd seq[name][1]=seq[name][1]+seqtoadd else: ord_list[n][1]=ord_list[n][1]+seqtoadd seq[name][0]=seq[name][0]+seqtoadd ctrl=ctrl+1 else: if ctrl<0: done=1 i=i+1 if order==0: return seq return ord_list #functions for gene class def are_overlapping_exons(exon1, exon2, strand='+', check_phase=True): """the input here are exon_o: [start, stop, phase] where start is always > than stop""" if exon1[0] > exon2[1]: # e1.start>e2.end return False if exon2[0] > exon1[1]: return False if not check_phase or not strand: return True if strand=='+' and mod3(exon1[0]-exon1[2]) != mod3(exon2[0]-exon2[2]): ###frame checking!!! (pos_start of exon + frame) mod3 must be the same for both --> equivalent: (pos_start of exon - phase) mod3 return False elif strand=='-' and mod3(-exon1[1]-exon1[2]) != mod3(-exon2[1]-exon2[2]): #just reversing coordinates. mod3 of negative numbers must work fine e.g. mod3(-1) = 2; mod3(-8) = 1 return False return True def mod3(number): #to better implement the mod3 with high numbers STILL TO FINISH return number%3 def exon_length(exon): """exon here being just [start, stop]""" return ( exon[1]-exon[0]+1 ) def summary_overlap(ov, g1, g2): """Prints a summary of the overlap information contained in the object returned by g1.overlaps_with(g2)""" if not ov: return "No overlapping" o='' for e_index in range(len(g1.exons)): o+='1E'+str(e_index+1)+' '+str(g1.exons[e_index][0])+'\t'+str(g1.exons[e_index][1])+' P'+str(g1.phase_of(e_index))+' -- ' if ov[e_index]: index_list=ov[e_index] if type(index_list[0])==list: index_list=[a[0] for a in index_list] for e_index2 in index_list: o+='2E'+str(e_index2+1)+' '+str(g2.exons[e_index2][0])+'\t'+str(g2.exons[e_index2][1])+' P'+str(g2.phase_of(e_index2))+' ++ ' o=o[:-4] if len(index_list)==1 and g2.exons[e_index2]==g1.exons[e_index]: o+=' ***' else: o+='None' o+='\n' return o[:-1] sequence_db={} def load_sequence_db(fasta_file, title_function=None): """Load a fasta file in memory for being used with function fast_sequence """ global sequence_db if title_function is None: title_function=lambda x:x.split()[0] for title, seq in parse_fasta(fasta_file): title_id= title_function(title) sequence_db [title_id] = seq def get_sequence_db(title=None): if title is None: return sequence_db else: try: return sequence_db[title] except KeyError: raise Exception, "MMlib - sequence_db ERROR cannot find a sequence with this title in memory {0}".format(title) class gene(object): """This class handles any kind of genomic coordinate, with each object that can consist by more than one range. The names of attributes/methods are thought for protein coding genes but the class can be used for a variety of other purposes and concepts. Typically, the genomic coordinates refer to a target file which is multifasta. Important: genomic coordinates are specified 1-based, and all indexes are 1-based unless otherwise specified. + Fundamental attributes (more can be added for specific purposes): .strand string + or - .chromosome string name of the first word of the fasta header where this gene resides. Can include gi codes. .exons list contains elements of the type [start, stop] for each exon. Start is always < stop, but the order of these elements is always upstream-to-downstream, so if the gene is on the negative strand, .exons[i][0] > .exons[i+1][0] .id string identifier of the gene. Typically useful when you load a lot at once .phases list automatically filled. Make sense only for protein coding genes and is used basically only when you compute the overlap between some of them. It's a list of: the number of bases of the first codon of a exon which are on the previous exon. there's a phase element for each exon. Careful: this is not flushed if you modify a gene object. .target string path to the target multifasta file. Necessary for the .fasta_sequence() method to be called + Most useful methods, divided per category: (for a full list, type in python: import MMlib; import inspect; print inspect.getmembers(gene, predicate=inspect.ismethod) ) # load / create / modify gene objects .__init__() you can create instances in the standard python fashion, calling gene(). Optionally, you can provide keyword arguments that will be interpreted as attributes of the newly created object. Example: gene(strand='+', chromosome='chrX', target='/data/genome.fa', id='gene1'). .add_exon(start,stop) with start< stop, add a new exon to this gene. It will be placed in the right place inside the .exons list, which is determined by the .strand attribute .load_gff(file) to be used when you have a gff file with only a single gene inside. Take care of providing the right tag (default is 'cds'; '*' can be used to consider all gff lines) .load_from_header(header) loads a header line, in the format in which is output by the .header() method .add_exons_from_positions_summary(p_sum) loads the .exons attribute from a string like the one returned by the .positions_summary method .remove_exon(indices*) remove one or more exons given their 0-based indices (0 is the first exon, and so on) .copy() returns a full, deep copy of this gene and its attributes. # access information / outputing .summary() returns a string with a summary of the information in the gene .boundaries() returns [start, stop] where these are the two boundaries of the gene .length() returns the total length of all exons in the gene .span() returns the total length of the gene (include introns) .header() returns a header that can be used in a fasta, which includes all the information about this gene. See full __doc__ for details .gff() returns a gff like text. Optional keyarguments can be used to control what it contains -- see full __doc__ for details .positions_summary() returns a string with a summary of the positions of the exons in this gene object. Example: 123-145,265-300,454-565,666-678,988-999 .fasta_sequence() returns a tuple [title, seq] where seq is the nt sequence as extracted from the .target file, taking only the entry corresponding to .chromosome (and the right positions) # derive gene objects from self / or combine with other genes .get_exon(exon_index) returns a copy of this gene, but with a single exon: the one specified by the (0-based!) exon_index argument .introns() returns a copy of self, but with positions corresponding to the introns of self instead. .subseq(start_subseq, length_subseq) returns a copy of this gene but only with a subseq of it. This may contain less exons than the .self (start_subseq is 1-based) .downstream(distance, length) returns a gene object that is downstream of self. Distance 0 means just downstream (no overlap). .upstream(d, l) exists as well .extend(left, right) returns a copy of this gene with extended boundaries .check_boundaries(chromosome_length) this function fixes invalid positions that may result from modifying gene object, because pos<1 or pos>chromosome_length .is_downstream_of(other_g) returns True if self is downstream of the other_g (note that .chromosome is not checked). .is_upstream_of(g) exists as well .intersection_with(other_g) returns a gene object with only the intersection with an other_g; the two genes should overlap or a empty gene will be returned .union_with(other_g) returns a gene object with the union with an other_g; the two genes should be on the same chromosome and strand or a empty gene will be returned. .subtracted_of(other_g) returns a copy of self removing all positions in common with other_g .overlaps_with(other_g, full=False, phase=True, summary=False, strand=True) check if this gene overlaps the other_g object. See the full .__doc__ of this method for details .restore_absolute_coordinates(parent_gene) useful when you have coordinates relative to a subseq of a chromosome. This will restore the coordinates to absolute, given the original subseq range .relative_coordinates(...) This is like the reverse of the last function. You have a certain (self) gene on a chromosome. You want to cut the chromosome in a subseq and know the new coordinates of the gene in the obtained subseq. ## other MMlib functions related to the gene class (not methods of gene though): load_all_genes(gff_file) returns a list of gene objects corresponding to the entries in a gff files. Define the wanted tag or any other .load_gff argument. This function determines a uniq id for each gff line which decides which exons go together. This defaults to taking the first word of the last gff field but the function can be defined with get_id merge_genes(gene_list) merge a list of genes to remove redundancy. It has lots of options, see its __doc__ for details """ def __init__(self, load_file='', species=None, tag=None, strand=None, chromosome=None, target=None, seed=None, **other_features): """ standard python fashion to create gene intances. Optionally, you can provide keyword arguments that will be interpreted as attributes of the newly created object. Example: gene(strand='+', chromosome='chrX', target='/data/genome.fa', id='gene1'). These can be also non-standard attributes that will be created. The load_file argument can be specified to load the gene from a gff file (using load_gff). The tag argument will be passed to this function. A tag is a gff element class, for example "cds", "exon" and so on... "cds" are loaded by default. "*" can be provided to load all lines. The seed argument can be provided to load another gene object into this instance. It is pretty much like copying, but changing the class to the one of self. """ self.exons=[] #first-coding exon first (if negative strand, position-based reverse order); each exon is [start, stop] where always tart<stop self.species=species self.target= target self.chromosome=chromosome self.strand=strand self.phases=[] self.id=None self.sec_pos=[] if load_file and tag: self.load(load_file, tag) elif load_file: self.load(load_file) for k in other_features: self[k]=other_features[k] if not self.id: self.id= str(uniq_id(self)) if not seed is None: g=seed.copy(newclass=self.__class__) self.__dict__= g.__dict__ return def __getitem__(self, index): if index in self.__dict__: return self.__dict__[index] if type(index)==int: return self.exons[index] else: return None def __setitem__(self, key, value): self.__dict__[key]=value def __str__(self): return self.summary() def __nonzero__(self): return bool(self.exons) def get(self, attribute): """ shortcut to get an attribute.. if not present, returns None (does not raise exception). """ return self.__getitem__(attribute) def __iter__(self): return self.exons def __delitem__(self, attribute): """Utility meant to delete non std attributes""" del self.__dict__[attribute] def copy(self, newclass=None): """All object features will be copied. if no new_class is provided, the one of the parent (self) will be used """ if newclass is None: a=self.__class__() else: a=newclass() for i in self.__dict__: if i !='exons': a.__dict__[i]=deepcopy(self.__dict__[i]) for i in self.exons: a.exons.append(list(i)) #hard copying the list of exons return a def summary(self, description='GENE OBJECT', other_fields=[], print_exons=True): """ returns a summary string for this object. A description="" argument can be provided, it will be displayed as a title. The other_fields argument can be provided with a list of strings corresponding to a list of attributes that you want to printed as well. The argument print_exons specifies if the list of exons has to be printed for the object (by default, it is) """ o='' o+='### '+description+' ###'+'\n' o+='# ID:\t'+str(self.id)+'\n' if self.species: o+='# SPECIES:\t'+str(self.species)+'\n' if self.target: o+='# TARGET:\t'+str(self.target)+'\n' o+='# CHROMOSOME:\t'+str(self.chromosome)+'\n' o+='# STRAND:\t'+str(self.strand)+'\n' o+='# BOUNDARIES:\t'+str(self.boundaries())+'\n' for f in other_fields: o+="# "+f.upper()+':\t'+ str(self[f])+'\n' if self.sec_pos: o+='# SEC_POS:\t' for i in self.sec_pos: o+=str(i)+' ' o+='\n' for e_index in range(len(self.exons)): o+='EXON'+str(e_index+1)+'\t'+str(self.exons[e_index][0])+'\t'+str(self.exons[e_index][1])+'\tPHASE:'+str(self.phase_of(e_index))+'\n' return o[:-1] def boundaries(self): """ Returns [start, stop] where these are the two boundaries of the gene (basically taking first and last exons)""" if self.exons: return [ min(self.exons[0][0], self.exons[-1][0]), max(self.exons[0][1], self.exons[-1][1])] #works both with + and - strand else: return None def boundaries_gene(self, minimal=False): """ Return a copy of the self gene with a single exon that span its boundaries""" if minimal: g=gene(chromosome=self.chromosome, strand=self.strand) g.exons=list(self.exons) else: g=self.copy() bounds=g.boundaries() while g.exons: g.remove_exon(0) g.add_exon(bounds[0], bounds[1]) return g def length(self): """ Returns the total nucleotide length of this gene""" l=0 for st, end in self.exons: l+=(end-st)+1 return l def __len__(self): return self.length() def span(self): """ Returns the total length in nucleotide that this gene spans. It differs from .length() because .span() counts the intron lengths as well""" if not self.exons: return 0 b=self.boundaries() return b[1]-b[0]+1 def phase_of(self, exon_index): """the number of bases of the first codon of a exon which are on the previous exon""" ######################### how phases changes from exon to exon #phase1#len%3 #phase2# ######################### # 0 # 0 # 0 # # 0 # 1 # 1 # # 0 # 2 # 2 # ######################### # 2 # 0 # 2 # # 2 # 1 # 0 # # 2 # 2 # 1 # ######################### # 1 # 0 # 1 # # 1 # 1 # 2 # # 1 # 2 # 0 # ######################### if len(self.phases)!=len(self.exons): self.phases=[] if not self.phases: for e_index in range(len(self.exons)): if e_index==0: self.phases.append(0) else: self.phases.append( mod3( self.phases[e_index-1]+ mod3(exon_length(self.exons[e_index-1])) ) ) return self.phases[exon_index] def load_gff(self, gff_file, tag='cds', check_sec_pos=True, keep_program=False, parse_keywords=False, keep_tag=False, process_line=None): """can accept as input direclty text, or path to a file, or a file handler; tag=* means any tag If check_sec_pos==True (default), then "Sec_position:" (as present in selenoprofiles gffs) are read from the gff input and this information is stored into the .sec_pos attribute. If keep_program==True (not default), a .program attribute is used to keep the program field of the gff being loaded (2nd field) If parse_keywords==True (not default), the 9th field of the gff is searched for expressions like: SOMEKEY:VALUE ; for each one found, a .SOMEKEY attribute is created and assigned to VALUE, taking care of converting VALUE to the appropriate type (string, number, float). A .keywords attribute is created to keep the list of attribute names added this way. You may specify another separator instead of ":" as argument of parse_keywords, as long as just one will be found per text block. For maximum flexibility, you have the process_line keyword. You can provide a function that accepts two arguments: a list of the fields (the result of line.split('\t' on the current line) and the self gene object being filled. In this way you can keep any other attribute in the line you're interested into. """ if parse_keywords==True: parse_keywords=':' if type(gff_file)==str and os.path.isfile(gff_file): gff_lines=open(gff_file, 'r').readlines() self.id=gff_file elif type(gff_file)==file: gff_lines=gff_file.readlines() self.id=gff_file.name else: #string gff_lines=gff_file.split('\n') try: line=gff_lines[0] self.chromosome=line.split('\t')[0] self.strand=line.split('\t')[6] if not self.species: path_splt=os.path.abspath(gff_file).split('/') if 'output' in path_splt and len(gff_file.split('/')[-1].split('.'))>=4: #selenoprofiles gff self.species=path_splt[path_splt.index('output')-1] for line in gff_lines: if line: splt=line.split('\t') if tag=='*' or lower(splt[2]) == lower(tag): self.add_exon( int(splt[3]), int(splt[4]) ) if check_sec_pos and len(splt)>8 and 'Sec_position:' in splt[8]: for i in splt[8].split('Sec_position:')[1:]: self.sec_pos.append(int(i.split()[0])) if parse_keywords and len(splt)>8: commentsplt=splt[8].split() for word_index, word in enumerate(commentsplt): if word.count( parse_keywords )==1: keyword=word.split(parse_keywords)[0] if word.endswith(parse_keywords): value=option_value(commentsplt[word_index+1]) else: value=option_value(word.split(parse_keywords)[1]) self[keyword]=value if not hasattr(self, 'keywords'): self['keywords']=[] self['keywords'].append(keyword) if keep_program: self['program']=splt[1] if keep_tag: self['tag']=splt[2] if not process_line is None: process_line(splt, self) except Exception, e: raise Exception, "ERROR loading gff: "+str(gff_file)+' '+str(e) load=load_gff def add_exon(self, start, stop): """ usage: gene_obj.add_exon(start, stop) where start<stop. exons can be added in any order, they will be placed in order. """ if stop<start: raise Exception, "gene->add_exon ERROR stop must be > than start. called with arguments: "+str(start)+' '+str(stop) index_to_append=0 if self.strand=='+': while index_to_append<len(self.exons) and self.exons[index_to_append][0]<start: index_to_append+=1 if self.strand=='-': while index_to_append<len(self.exons) and self.exons[index_to_append][0]>start: index_to_append+=1 self.exons.insert(index_to_append, [start, stop]) def remove_exon(self, *indices): """ remove one or more an exon given the index(es). After that, indices will change. can take as arguments both indices or list of indices. NOTE: indices are 0-based here! """ index_list=[] for i in indices: if i or i==0: if type(i)==list: index_list.extend(i) else: index_list.append(i) index_list.sort(reverse=True) for i in index_list: #reversing so the change of index does not affect this function self.exons.pop(i) def get_exon(self, exon_index, minimal=False): """ Return a copy of the self gene, with a single exon given by the exon index (0 is for the first one); id put by default is the id of the self gene plus the flag "_exonN" where N is the exon index """ if minimal: g=gene(chromosome=self.chromosome, strand=self.strand) else: g=self.copy() g.id=self.id+"_exon"+str(exon_index) g.exons=[] g.add_exon( self.exons[exon_index][0], self.exons[exon_index][1] ) return g def overlaps_with(self, other_g, full=False, phase=True, summary=False, strand=True): """Main function. Computes the overlap between two gene objects. By default phase is checked as well, use kwarg phase=False to disable it (when not dealing with cds). Same for strand. It returns False if the chromosome or strand are different, or one of the two objects is empty, or if the gene boundaries are not overlapping, or if they are but no exon is overlapping and same phase of another one in the second gene. If there is overlap, it returns: (if Full=False, by default it is) a list of lists, one for each exon of the self gene. Each of these contains the indexes of the exons of the second gene overlapping even partially with this exon of the self gene. e.g. [[0], [2], [], [5]] --> G1exon1 overlaps with G2exon1, G1exon2 overlaps with G2exon3, G1exon3 is not overlapping with anything, G1exon4 is overlapping with G2exon6 If Full=True, instead of just an index, each of these contains a three element list for each exon of the second gene overlapping even partially with this exon of the self gene: [index, start, stop], where start and stop are the positions of the overlapping intersection. if summary=True, instead of normal (or full) output, a tuple (output, summary) is returned, summary being computed by the function summary_overlap() """ if self.chromosome != other_g.chromosome or (strand and self.strand != other_g.strand) or not self.exons or not other_g.exons: #or self.species != other_g.species ###assuming nobody is so stupid to look for overlap in genes in different species o= False #producing false exons (with phase 0) to check boundaries else: if strand: strand=self.strand if are_overlapping_exons(self.boundaries(), other_g.boundaries(), strand, False): #they are sharing some genomic space. I assume that now testing brutally each exon of gene1 against each exon of gene2 would not be too expensive: overlap_list=[] for e1_index in range(len(self.exons)): exon_g1=list(self.exons[e1_index]) exon_g1.append(self.phase_of(e1_index)) overlapping_this_exon=[] for e2_index in range(len(other_g.exons)): exon_g2=list(other_g.exons[e2_index]) exon_g2.append(other_g.phase_of(e2_index)) if are_overlapping_exons( exon_g1, exon_g2, self.strand, phase ): if full: a=[e2_index] a.extend(intersection_of(exon_g1, exon_g2)) overlapping_this_exon.append( a ) else: overlapping_this_exon.append(e2_index) overlap_list.append(overlapping_this_exon) if not any(overlap_list): o= False else: o= overlap_list else: o= False if summary: return o, summary_overlap(o, self, other_g) else: return o def subseq(self, start_subseq, length_subseq=None, minimal=False, newclass=None): """ This function returns a gene object obtained cutting a subseq of the self object. If the a single exon do not contain the whole length, the returned object will have more than one exon. NB start_subseq is 1 based. """ if not minimal: out_gene=self.copy(newclass=newclass); out_gene.exons=[] else: out_gene=gene(chromosome=self.chromosome, strand=self.strand, target=self.target) if length_subseq is None: length_subseq = self.length() - start_subseq + 1 first_exon_index, length_of_previous_exons = 0, 0 while first_exon_index<len(self.exons) and self[first_exon_index][1] - self[first_exon_index][0]+1 +length_of_previous_exons <= start_subseq-1 : #exiting this loop, the start of secis is included in this exon length_of_previous_exons+= self[first_exon_index][1]-self[first_exon_index][0]+1 first_exon_index+=1 if first_exon_index>=len(self.exons): raise Exception, "ERROR subseq function called with start too high for this gene object: "+gene.summary(self) #print "first_exon_index:"+str(first_exon_index) if self.strand=='+': position_start_of_subseq = self[first_exon_index][0] + (start_subseq-1-length_of_previous_exons) out_gene.add_exon( position_start_of_subseq , min( position_start_of_subseq + length_subseq -1 , self[first_exon_index][1] ) ) current_exon = first_exon_index remaining_length = length_subseq - (out_gene[0][1]-out_gene[0][0]+1) while remaining_length : #print "remaining_length"+str(remaining_length) current_exon+=1 if current_exon>=len(self.exons): raise Exception, "ERROR subseq function called with length too high for this gene object." out_gene.add_exon( self[current_exon][0] , min( self[current_exon][0]+remaining_length-1 , self[current_exon][1] ) ) remaining_length -= (out_gene[-1][1]-out_gene[-1][0]+1) elif self.strand=='-': position_start_of_subseq = self[first_exon_index][1] - (start_subseq-1-length_of_previous_exons) out_gene.add_exon( max( position_start_of_subseq - (length_subseq -1) , self[first_exon_index][0] ) , position_start_of_subseq ) current_exon = first_exon_index remaining_length = length_subseq - (out_gene[0][1]-out_gene[0][0]+1) while remaining_length : #print "remaining_length"+str(remaining_length) current_exon+=1 if current_exon>=len(self.exons): raise Exception, "ERROR subseq function called with length too high for this gene object." out_gene.add_exon( max( self[current_exon][1]-(remaining_length-1) , self[current_exon][0] ) , self[current_exon][1] ) remaining_length -= (out_gene[-1][1]-out_gene[-1][0]+1) return out_gene def reverse_subseq(self, start_subseq, length_subseq): """This function is equivalent to subseq, except for the fact that the index refers to the end to the gene. so, a start_subseq of 1 and length_subseq of 3 would return a gene object with only the last codon """ a=self.copy() if self.strand=='+': a.strand='-' elif self.strand=='-': a.strand='+' a.reorder_exons() b=a.subseq(start_subseq, length_subseq) b.strand=self.strand b.reorder_exons() return b def reorder_exons(self): """ This function put the exons uin the correct order, that is to say, upstream to downstream, which depends on the .strand attribute.""" if self.strand=='+': self.exons.sort(key=lambda x:x[0]) elif self.strand=='-': self.exons.sort(key=lambda x:x[0], reverse=True) def restore_absolute_coordinates(self, parent_gene, inplace=True): """ This function is useful when the coordinates on the self gene are relative to some portion of the chromosome, they are not absolute. By providing a gene object representing the portion to which they were relative, coordinates are set back to absolute. NB: the strand of the result is computed as in maths by the product of the strand of the self and of the parent. If inplace==True, the coordinates are modified in the self object, otherwise a new object with absolute coordinates is returned """ g=gene(chromosome=parent_gene.chromosome) if self.strand!=parent_gene.strand: g.strand='-' else: g.strand='+' for start, stop in self.exons: s=parent_gene.subseq(start, (stop-start+1)) for s_start, s_stop in s.exons: g.add_exon(s_start, s_stop) if inplace: self.exons=g.exons self.chromosome=g.chromosome self.strand=g.strand else: return g def relative_coordinates(self, subseq_start, subseq_end, reverse=False, chromosome='subseq_sequence'): """ This function can be seen as the reverse of restore_absolute_coordinates. You have a certain (self) gene on a chromosome. You want to cut the chromosome in a subseq and know the new coordinates of the gene in the obtained subseq. If you also reverse complemented the subseq, used reverse=True. To define the chromosome attribute of the returned gene, use the chromosome=... keyarg All coordinates are 1 based """ new_g=gene(chromosome=chromosome) if not reverse: #subseq was cut normally, no rev comp new_g.strand = self.strand for start, end in self.exons: new_g.add_exon( start - subseq_start +1 , end-subseq_start+1 ) else: #subseq was cut and then rev comp if self.strand=='-': new_g.strand='+' else: new_g.strand='-' for start, end in self.exons: new_g.add_exon( subseq_end - end +1, subseq_end - start +1 ) return new_g def downstream(self, distance, region_length, **keyargs): """return a gene object with the region downstream this gene. vars: distance, region_length. distance=0 means the region is right downstream - starting from the very next base. Using a negative distance value, you will obtain a partial overlap with this gene (e.g. distance=-3 and region length=6 will return a gene object with the last codon of this gene plus the next codon) You can provide as keyargs any other attribute that you want to set in the resulting gene object. If you don't set the id, it will be set to its uniq_id in the python environment. """ g=gene(strand=self.strand, chromosome=self.chromosome, target=self.target) if self.exons: if self.strand=='+': g.add_exon(self.exons[-1][1]+1+distance, self.exons[-1][1]+1+distance+region_length-1) elif self.strand=='-': g.add_exon( self.exons[-1][0]-1-distance-region_length+1, self.exons[-1][0]-1-distance) for k in keyargs: g[k]=keyargs[k] if not g.id: g.id=str(uniq_id(g)) return g def upstream(self, distance, region_length, **keyargs): """see downstream function. """ g=gene(strand=self.strand, chromosome=self.chromosome, target=self.target) if self.exons: if self.strand=='+': g.add_exon(self.exons[0][0]-1-distance-region_length+1 ,self.exons[0][0]-1-distance) elif self.strand=='-': g.add_exon(self.exons[0][1]+1+distance, self.exons[0][1]+1+distance+region_length-1) for k in keyargs: g[k]=keyargs[k] if not g.id: g.id=str(uniq_id(g)) return g def is_upstream_of(self, other_g, max_overlap=0): """checks if gene self is upstream (not a single base overlap, unless max_overlap!=0) of other_g ; if the genes are not on the same strand, an exception is raised. It the output is "true", actually te distance between the two genes is reported. If the two genes are exactly adjacent, the distance reported is 1 (to avoid a 0 in a bool evaluation). If max_overlap is not 0, in case this rescues an evaluation , the n of bases in overlap are returned as a negative value. NOTE: the chromosome attribute is not checked """ if self.strand!=other_g.strand: raise Exception, "ERROR is_upstream_of function: genes are not on the same strand" if self.strand=='+': dist= other_g.exons[0][0]-self.exons[-1][1] elif self.strand=='-': dist= self.exons[-1][0]-other_g.exons[0][1] if dist<=0: if dist+max_overlap>0: return dist-1 else: return False else: return dist def is_downstream_of(self, other_g, max_overlap=0): """ see is upstream of""" return other_g.is_upstream_of(self, max_overlap=max_overlap) def extend(self, left=0, right=0, inplace=False, minimal=False, down=None, up=None): """ This function returns a copy of the self gene object with extended boundaries. The new coordinates could be out of the allowed range... it is suggested to run check_boundaries right after. If inplace==True, the gene is modified inplace and not returned. If down or up are specified, the direction of the extension depends on the strand """ if not down is None or not up is None: if down is None: down=0 if up is None: up=0 if self.strand=='+': return self.extend(left=up, right=down, inplace=inplace, minimal=minimal) elif self.strand=='-': return self.extend(left=down, right=up, inplace=inplace, minimal=minimal) if not self.exons: raise Exception, "gene->extend ERROR can't extend an empty gene! no exons were found for "+str(self.id) if inplace: g=self elif minimal: g=gene(chromosome=self.chromosome, strand=self.strand, target=self.target) g.exons=list(self.exons) else: g=self.copy() if g.strand=='+': g.exons[0] = [g.exons[0][0]-left, g.exons[0][1]] g.exons[-1]= [g.exons[-1][0], g.exons[-1][1]+right] elif g.strand=='-': g.exons[0]= [g.exons[0][0], g.exons[0][1]+right] g.exons[-1] = [g.exons[-1][0]-left, g.exons[-1][1]] if not inplace: return g def check_boundaries(self, chromosome_length=False): """ This function checks if the gene object has some illegal exons, that is to say, exons that spans regions which don't exist, either with positions <1 or > than the length of the chromosome. This must be provided as an argument, otherwise this fact is not checked. Illegal exons are removed when they are totally illegal, or set to legal positions when at least a small region of it is legal. A string message with the removed exon is returned if any was removed. Returns 0 if nothing has changed, 1 if some positions were changed, a string with details if some entire exons were removed """ modified=0 to_remove={} #keep the indices of the exons which are out of bound for exon_index in range(len(self.exons)): start, stop= self.exons[exon_index] if stop<1: to_remove[exon_index]=1 if start<1: self.exons[exon_index][0]=1 modified=1 if chromosome_length: if start>chromosome_length: to_remove[exon_index]=1 if stop>chromosome_length: self.exons[exon_index][1]= chromosome_length modified=1 if to_remove: msg='removed exons: ' for k in sorted(to_remove.keys()): msg+=str(k)+' ('+str(self.exons[k][0])+','+str(self.exons[k][1])+') ' self.remove_exon( to_remove.keys() ) return msg return modified def intersection_with(self, geneobj, check_phase=False): """This method returns another gene object, with only the intersection of the two gene object in input. The phase is considered only if check_phase is True""" out_gene=self.copy(); out_gene.exons=[]; out_gene.chromosome=None; out_gene.strand=None if not self.chromosome==geneobj.chromosome: return out_gene out_gene.chromosome=self.chromosome if not self.strand==geneobj.strand: return out_gene out_gene.strand=self.strand ov=self.overlaps_with(geneobj, full=True, phase=check_phase) if ov: for selfindex in range(len(ov)): if ov[selfindex]: for geneindex, start, stop in ov[selfindex]: out_gene.add_exon(start, stop) return out_gene def union_with(self, geneobj, check_phase=False, id='SUM', **keyargs): """This method returns another gene object, with the union of the two gene objects in input. The phase is considered to check overlap, only if check_phase is True. The variable id defines what the id of the result will be like. If it is "SUM", the id will be the results of joining the two original ids with "_+_". If id is "LONGEST", the resulting gene object will have the id of the longest of the joined gene object only (or the first one if length is the same). Lastly, if you defined an id which is not SUM or LONGEST, it becomes the id of resulting gene object. Also, this can be checked for all other key arguments given. (So you can define also program='SUM' or program='some_program') """ #print self.summary('BASE1') #print geneobj.summary('BASE2') out_gene=self.copy(); out_gene.exons=[] if not self.chromosome==geneobj.chromosome: return out_gene out_gene.chromosome=self.chromosome if not self.strand==geneobj.strand: return out_gene out_gene.strand=self.strand for start, end in self.exons + geneobj.exons: #print "cycling exon", start, end t_gene=gene(strand=self.strand, chromosome=self.chromosome) t_gene.add_exon(start, end) o=t_gene.overlaps_with(out_gene, phase=check_phase) if o: #the exon which I am about to add overlaps with something that I already added. Therefore, I must instead replace everything that overlaps with this with a new exon that includes them all exons_to_replace = o[0] #list of indices start, end = min( [out_gene[i][0] for i in o[0]]+[t_gene[0][0]] ) , max ([out_gene[i][1] for i in o[0]]+[t_gene[0][1]] ) #print "removed existing: ", o[0], '; adding: ', start, end out_gene.remove_exon(o[0]) #removing all overlapping exons in out_gene out_gene.add_exon(start, end) if not keyargs.has_key('id'): keyargs['id']=id for k in keyargs: if keyargs[k]=='SUM': out_gene[k]= str(self[k])+'_+_'+str(geneobj[k]) elif keyargs[k]=='LONGEST': if self.length()>=geneobj.length(): out_gene[k]= str(self[k]) else: out_gene[k]= str(geneobj[k]) else: out_gene[k]= keyargs[k] #print out_gene.summary('after union') return out_gene def subtracted_of(self, geneobj): """This returns a gene object, result of the self object subtracted of the second gene object. All overlapping regions are removed. """ out_gene=self.copy(); out_gene.exons=[] if not self.chromosome==geneobj.chromosome: return self out_gene.chromosome=self.chromosome if not self.strand==geneobj.strand: return self out_gene.strand=self.strand ov=self.overlaps_with(geneobj, full=True, phase=False) if not ov: return self else: for selfindex in range(len(ov)): if ov[selfindex]: for geneindex, start, stop in ov[selfindex]: if not self[selfindex][0]==start: out_gene.add_exon(self[selfindex][0], start-1) if not self[selfindex][1]==stop: out_gene.add_exon(stop+1, self[selfindex][1]) else: out_gene.add_exon( self[selfindex][0], self[selfindex][1] ) return out_gene def introns(self, minimal=False, skip_null_introns=False): """ Returns a gene object containing the introns of the self object. The self is copied so that all features (apart the genomic coordinates of exons) will be present in the returned gene. The id assigned will be equal to the id of self gene plus the flag "_introns". NOTE: if the self object has exons which are adjacent, an exception is raise; instead, it is tolerated (and these introns will just not be returned) if skip_null_introns=True """ if minimal: g=gene(chromosome=self.chromosome, strand=self.strand) else: g=self.copy() g.id=self.id+"_introns" g.exons=[] for exon_index in range(1, len(self.exons)): if self.strand=='+': intron_start = self.exons[exon_index-1][1]+1 intron_end = self.exons[exon_index] [0]-1 elif self.strand=='-': intron_start = self.exons[exon_index] [1]+1 intron_end = self.exons[exon_index-1][0]-1 if intron_start == intron_end+1: ## adjacent exons! if skip_null_introns: continue raise Exception, "gene->introns ERROR there are adjacent exons! cannot return a null intron!! Use skip_null_introns=True to tolerate this and skip these introns" g.add_exon(intron_start, intron_end) return g def bed(self, exon_index=None, show_id=True, strand=True, score=False, other_fields=[], sep='\t'): """Generic function to produce a bed from a gene object. E.g. default: (spacers are tab): chr4 9236903 9236953 BE_0001 0 + chr4 49110668 49110718 BE_0001 0 + By default (exon_index==None) this includes one line for each (start,end) in self.exons. If you provide a 0-based exon_index, it gives a single line for that certain exon. By default (show_id==True) the name in the bed out (fourth field) is printed and it is self.id; provide a string argument to show_id to print this as name instead. Provide show_id=False to show_id to show minimal bed (e.g. "chr4 9236903 9236953"). In this case, all following args are ignored. If strand is False, then only the first 4 fields in bed are printed, (minimal bed + name/id). If strand is True (default), at least the first 6 fields in bed are printed (up to strand including score), with score set to 0 unless provided as argument score. If strand==True, then other_fields can be provided, so each item in this list is converted to string and added at every line. By default output is tab-separated; use sep==' ' to produced space separated """ out='' if exon_index is None: considered_exons=self.exons else: try: considered_exons=self.exons[exon_index] except IndexError: raise Exception,"gene->bed ERROR exon index is invalid! {0} Gene: {1}".format(exon_index, self.header()) for start, end in considered_exons: out+='\n{1}{0}{2}{0}{3}'.format(sep, self.chromosome, start, end) if not show_id: break elif show_id==True: out+='{0}{1}'.format(sep, self.id) else: out+='{0}{1}'.format(sep, show_id) if strand and not score: score=True if not score: break else: if score==True: score=0 out+='{0}{1}'.format(sep, score) if not strand: break else: out+='{0}{1}'.format(sep, self.strand) for the_field in other_fields: out+='{0}{1}'.format(sep, the_field) return out[bool(out):] def gff(self, tag='', is_gtf=False, comment='', program='', sec_pos=False, id=None, score=None, position_features=[], last_field=None): """Generic function to produce a gff from a gene object. The options control the open fields. sec_pos was used for selenoprofiles < 3.1; position_features is required for >= 3.1. it is thought to add (one or few) single position features as comment in the same line of the exon they belong to. format: [[position, what_to_write], ... ] Last field overrides the id and the comment arguments """ if id is None: its_id=str(self.id) else: its_id=id if not tag: if not hasattr(self, 'tag'): tag='cds' else: tag=getattr(self, 'tag') if not program and self['program']: program=self['program'] elif not program: program='generic_program' if score is None: score_txt='.' elif type(score) in (str, float, int): score_txt=str(score) out='' for start, end in self.exons: add_sec_pos='' ################# ### deprecated if sec_pos: ##### note: this was added for selenoprofiles, but it is not used anymore sec_positions_list=[] if self.sec_pos: sec_positions_list=self.sec_pos # self.sec_pos is not used anymore. All the necessary information is already in the aminoacid sequence in the alignment attribute. So, I built a method available for p2ghit gene class that returns a position list as the self.sec_pos list was. else: try: sec_positions_list=self.sec_positions_list() except: pass for sec_position in sec_positions_list: ### deprecated if sec_position>= start and sec_position<= end: add_sec_pos+='Sec_position:'+str(sec_position)+' ' ################# if position_features: for pos, what_to_write in position_features: if pos>= start and pos<= end: add_sec_pos+=what_to_write+' ' if last_field is None: if is_gtf: last_field_txt='gene_id "'+its_id+ '"; transcript_id "'+its_id+'"; '+add_sec_pos+comment else: last_field_txt=its_id+' '+add_sec_pos+comment else: last_field_txt=last_field out+=self.chromosome+'\t'+program+'\t'+tag+'\t'+str(start)+'\t'+str(end)+'\t'+score_txt+'\t'+self.strand+'\t.\t'+last_field_txt+'\n' return out[:-1] def geneid_gff(self, program='', id=None, score=None, no_frame=False): """ Output a gff to be used with geneid option -R or -O score: None --> . is set ; which means: element is compulsory (like infinite score) float --> this value is set for all exons list ---> list of values, of exons, in order, upstream to downstream no_frame: False --> assume this gene is protein coding, so we derive the information of the frame and we feed it to geneid, True --> if this is not a protein coding gene structure, set frame column to ".", which is equivalent to set it to unknown """ if type(score)==list and len(score)!= len(self.exons): raise Exception, "gene->geneid_gff ERROR list of scores provided does not have exactly a score for each exon" if id is None: its_id=str(self.id) else: its_id=id if not program and self['program']: program=self['program'] elif not program: program='generic_program' out='' for index, element in enumerate(self.exons): start, end = element last_field=its_id if score is None: score_txt='.' elif type(score) in [float, int]: score_txt=str(round(score, 2)) elif type(score)==list: score_txt=str(round(score[index], 2)) else: raise Exception, "gene->geneid_gff ERROR score not accepted: "+str(score) if no_frame: frame_txt= '.' else: phase= self.phase_of(index) frame= (3-phase)%3 frame_txt=str(frame) if len(self.exons)==1: tag='Single' ######## finish! out+=self.chromosome+'\t'+program+'\t'+tag+'\t'+str(start)+'\t'+str(end)+'\t'+score_txt+'\t'+self.strand+'\t'+frame_txt+'\t'+last_field+'\n' return out[:-1] def extend_orf(self, chromosome_length=None, stops={'TGA':1, 'TAG':1, 'TAA':1}, starts={'ATG':1}, up=True, down=True, get_seq=lambda x:replace(upper(x.fasta_sequence()[1]),'U','T'), extension_parameter=1000, keep_seq=False): """ Extend a gene both upstream and downstream so that the first codon will be a start (the most upstream one without including a stop), and the last codon will be a stop. It returns a tuple like (nt_extended_upstream, nt_extended_downstream), how much was extended in the two directions. You must provide chromosome_length, which is the length of the sequence to which self.chromosome points to; this is used to check if during the extension, this boundary is passed. You can avoid providing this only in the case in which the variable chromosome_lengths is defined in MMlib and has self.chromosome as key. The allowed Starts and Stops can be provided as argument, in the form of any iterable (checked with "in"; use hashes for max speed). If you want to extend upstream to the next stop regardless of start codon, use starts={} The arguments up and down (True by default) control the direction in which the extension is attempted (upstream and/or downstream). The argument get_seq tells how the sequence of the gene object should be retrieved. There are at least two possibilities: - lambda x:x.fasta_sequence()[1] #default; the .target attribute of the gene object must be defined, and the split_folder variable must be available in MMlib (see set_local_folders) - lambda x:x.fast_sequence() #faster but more memory intensive; to call this, the full target sequence database must be loaded in memory with function load_sequence_db The argument extension_parameter tells how big are the chunks of sequence retrieved at once; this parameter is applied to both sides of The argument keep_seq, if True, sets the sequence of the new object as its .seq attribute before returning it. """ if chromosome_length is None: try: chromosome_length=chromosome_lengths[self.chromosome] except: raise Exception, "extend_orf ERROR you must provide chromosome_length as argument; this may be skipped if chromosome_lengths is defined in MMlib and has self.chromosome as key. But this chromosome was not found: " +str(self.chromosome) if not hasattr(self, 'original_bounds'): self.original_bounds=self.boundaries() allowed_letters='ATGC' ### extending and getting sequence. Doing it at once to avoid calling get_seq multiple times. big_extended_g= self.extend(right=extension_parameter, left=extension_parameter, inplace=False) big_extended_g.check_boundaries(chromosome_length) down= down and big_extended_g.downstream(0, 1).boundaries()[0] != self.downstream(0, 1).boundaries()[0] up= up and big_extended_g.upstream(0, 1).boundaries()[0] != self.upstream(0, 1).boundaries()[0] big_extended_seq= upper( get_seq(big_extended_g) ) if self.strand=='+': offset= self.boundaries()[0]-big_extended_g.boundaries()[0] extended_out_downstream= ( big_extended_g.boundaries()[1] - self.boundaries()[1] ) != extension_parameter elif self.strand=='-': offset= big_extended_g.boundaries()[1]-self.boundaries()[1] extended_out_downstream= ( self.boundaries()[0] - big_extended_g.boundaries()[0] ) != extension_parameter extended_out_upstream= offset!= extension_parameter #seq_g= big_extended_seq [offset:offset+ self.length() ] if up or down: last_codon=big_extended_seq [offset+self.length()-3:offset+self.length() ] while down and not last_codon in stops: # last codon is not stop codon: keep extending. out of boundaries or weird sequence cause it to break ## going downstream codon_downstream= big_extended_seq [offset+ self.length(): offset+self.length()+3] ### forcing to stop if letters are not standard (e.g. Ns) if not all ( [c in allowed_letters for c in codon_downstream] ): down=False; break if len(codon_downstream)!=3: break self.extend(down=3, inplace=True) last_codon=codon_downstream ## first we extend up to the stop, then we subseq first_codon = big_extended_seq [offset:offset+3] while up and not first_codon in stops: codon_upstream= big_extended_seq [offset-3:offset] if not all ( [c in allowed_letters for c in codon_upstream] ): up=False; break if len(codon_upstream)!=3: break self.extend(up=3, inplace=True) offset-=3 first_codon=codon_upstream rerun_up=False; rerun_down=False if down and not last_codon in stops and not extended_out_downstream \ and abs(big_extended_g.downstream(0, 1).boundaries()[0] - self.downstream(0, 1).boundaries()[0]) <3: rerun_down=True ## we extended to the limit if up and not first_codon in stops and not extended_out_upstream \ and abs(big_extended_g.upstream(0, 1).boundaries()[0] - self.upstream(0, 1).boundaries()[0]) <3: rerun_up=True ## we extended to the limit if rerun_up or rerun_down: #print self # print "rerun", rerun_up, rerun_down return self.extend_orf(chromosome_length, stops=stops, starts=starts, get_seq=get_seq, up=rerun_up, down=rerun_down, extension_parameter=extension_parameter, keep_seq=keep_seq) ### cut to first start if starts: first_start=None for codon_index in range(self.length()/3): codon= big_extended_seq [ offset+codon_index*3:offset+codon_index*3+3 ] if codon in starts: first_start=codon_index; break if not first_start is None: if self.strand=='+': actually_reducing= self.boundaries()[0]+first_start*3 > self.original_bounds[0] elif self.strand=='-': actually_reducing= self.boundaries()[1]-first_start*3 < self.original_bounds[1] if not actually_reducing: ## cutting here new_coords_g=self.subseq( first_start*3 + 1 ) offset+=first_start*3 self.exons=list(new_coords_g.exons) elif up: ## we went up to the closest stop but found no Methionine in this upstream extension. Let's go back to the original 5' boundary if self.strand=='+': offset+=self.original_bounds[0]-self.exons[0][0]; self.exons[0][0]=self.original_bounds[0] elif self.strand=='-': offset+=self.exons[0][1]-self.original_bounds[1]; self.exons[0][1]=self.original_bounds[1] if keep_seq: self.seq= big_extended_seq [offset:offset+ self.length() ] if self.strand=='+': extended_up = self.original_bounds[0] - self.boundaries()[0]; extended_down = self.boundaries()[1] - self.original_bounds[1] elif self.strand=='-': extended_up = self.boundaries()[1] - self.original_bounds[1]; extended_down = self.original_bounds[0] - self.boundaries()[0] del self.original_bounds return extended_up, extended_down def header(self, no_id=False, no_species=False, no_target=False, no_chromosome=False, no_strand=False, compress=False, max_exons_uncompressed=5, **other_fields): """ This returns a header with all information about the gene... it can be used as a one line description. As keyed arguments, you can provide attributes of the gene that you want to be included in the header. Example, if the gene X has a .program attribute with value "blast", you can call the function X.header(program=1) and the string " program:blast " will be included in the header, at the end. You can also specify function that must be called instead of attributes. In this case just specify a field called like function_xxx , and the function xxx will be called and the string xxx:output will be included in the header. No arguments can be specified for such functions """ species_string='' if bool(self.species and not no_species): species_string=str(self.species) if ' ' in species_string: species_string='"'+species_string+'"' species_string=' species:'+species_string out=(str(self.id)+" ")*int(not no_id) + ("chromosome:"+self.chromosome+' ')*int(not no_chromosome) +("strand:" +self.strand+' ')*int(not no_strand)+'positions:'+self.positions_summary(compress, max_exons_uncompressed) +species_string+ (' target:'+str(self.target))*int(bool(self.target and not no_target)) for k in other_fields: if k.startswith('function_'): f=k.split('function_')[1] try: out+=' '+f+':'+str(getattr(self, f)()) except: printerr("gene->header ERROR searching/executing function: "+str(f)+' in gene with id '+str(self.id), 1) raise else: try: out+=' '+k+':'+str(self[k]) except: raise Exception, "gene->header ERROR can't find field: "+str(k)+' in gene with id '+str(self.id) return out def load_from_header(self, header_line): """ Reverse of last function... to load a gene back from there.""" while header_line[0] in ['#', '>']: header_line=header_line[1:] #removing trailing characters so it can be used on fasta headers or comment lines. self.__init__() if 'chromosome:' in header_line: self.chromosome=header_line.split('chromosome:')[1].split()[0] if 'strand:' in header_line: self.strand=header_line.split('strand:')[1].split()[0] self.add_exons_from_positions_summary( header_line.split('positions:')[1].split()[0] ) if 'species:' in header_line: species_string=header_line.split('species:')[1].split()[0] if species_string[0]=='"': species_string=header_line.split('species:"')[1].split('"')[0] if species_string!='None': self.species=species_string if 'target:' in header_line: target_string=header_line.split('target:')[1].split()[0] if target_string!='None': self.target=target_string first_word= header_line.split()[0] if not (first_word.startswith('chromosome:') or first_word.startswith('strand:') or first_word.startswith('positions:') or first_word.startswith('species:') or first_word.startswith('target:')) : self.id=header_line.split()[0 ] def positions_summary(self, compress=False, max_exons_uncompressed=5): """ Returns a string with a summary of the positions of the exons in this gene object. Example: 123-145,265-300,454-565,666-678,988-999 If compress == True, this summary is compressed and limited to certain exons, whose number is decided by: max_exons_uncompressed Example compress==True, max_exons_uncompressed=4 --> 123-145,265-300,454-565,...,988-999 """ out='' if self.exons: if compress and len(self.exons) > max_exons_uncompressed: for start, stop in self.exons[:max_exons_uncompressed-1]: out+=str(start)+'-'+str(stop)+',' out+='...,'; out+=str(self.exons[-1][0])+'-'+str(self.exons[-1][1])+',' else: for start, stop in self.exons: out+=str(start)+'-'+str(stop)+',' out=out[:-1] #removing last , return out def add_exons_from_positions_summary(self, pos_summ): """ Read an uncompressed positions summary once produced by the above function and add to the self object all exons in it. If the summary is compressed, an exception is raised """ for piece in pos_summ.split(','): if piece=='...': raise Exception , "gene->add_exons_from_positions_summary ERROR can't load exons from a compressed positions summary!" start= int(piece.split('-')[0]) ; end=int(piece.split('-')[1]) self.add_exon(start, end) def fasta_title(self): """ Analog to header, but it is used to generate a single word with all the information to load back the gene after, and be able to understand which portion of which chromosome was exactly written in a fasta file. This is useful since some programs keep only the first word of the fasta header. """ return self.chromosome+'[positions:'+self.positions_summary() +'][strand:'+self.strand+']' def load_from_fasta_title(self, title): """ Reverse of last function... to load a gene back from there.""" self.chromosome = title.split('[positions:')[0] self.strand = title.split('[strand:')[1].split(']')[0] self.add_exons_from_positions_summary(title.split('[positions:')[1].split(']')[0]) def fast_sequence(self): """ Can be used only if the target file for this gene is loaded in memory in the sequence_db object (see function load_sequence_db). provides a much faster way to get sequences than method fasta_sequence. returns string with the sequence """ if not sequence_db.has_key(self.chromosome): raise Exception, "ERROR fast_sequence() cannot find chromosome identifier: "+str(self.chromosome) seq_out='' if self.strand=='-': for start, end in reversed(self.exons): seq_out+= sequence_db[self.chromosome] [ start-1:end ] seq_out= reverse_complement(seq_out) else: for start, end in self.exons: seq_out+= sequence_db[self.chromosome] [ start-1:end ] if len(seq_out)!=self.length(): raise Exception, "ERROR fast_sequence() wrong sequence length in memory! aborting " return seq_out def fasta_sequence(self, to_file='', target='', chromosome_file='', split_exons=False, title=''): """ Cut the sequence corresponding to this gene object. If to_file is defined, nothing is returned, and the subseqed sequence is written to "to_file" argument. ; if it is not defined, a single (title, seq) object is returned. target can be defined as key arg to override the target attribute of the gene object. It should point to the genome (multifasta or not) file. if split_exons is True, then multiple exons will be present in the output as fata entries (so, if split_exons and to_file is not defined, a list of (title, seq) is returned instead of a single entry. if you don't define title, this will be computed with the function header. If title is 'fasta_title', the method fasta_title is used to determine the title of the output fasta. If split_exons is True, then the word "_EXON" plus the index is added to the FIRST WORD of the title (defined as argument or not). """ if not chromosome_file and not target and not self.target : if 'reference_genome_filename' in globals() and is_file(reference_genome_filename): target=reference_genome_filename else: raise Exception, "gene-> fasta_sequence ERROR the target is not defined " elif not chromosome_file and not target: target=self.target if chromosome_file: if not is_file(chromosome_file): raise Exception, "gene-> fasta_sequence ERROR the chromosome_file "+chromosome_file+" was not found" chrom_file=chromosome_file else: chrom_file=fastafetch(split_folder, self.chromosome, target) if not to_file: file_out=temp_folder+'gene_fasta_seq' else: file_out=to_file bbash('>'+file_out) if title=='fasta_title': title=self.fasta_title() elif not title: title=self.header() if not split_exons: write_to_file(">"+title, file_out) for start, stop in self.exons: if self.strand=='+': fastasubseq(chrom_file, start-1, stop-start+1, file_out, pipecommand=" | gawk 'NR>1' >") #appending elif self.strand=='-': fastasubseq(chrom_file, start-1, stop-start+1, temp_folder+'gene_fasta_seq_to_revcomp') bbash("fastarevcomp "+temp_folder+"gene_fasta_seq_to_revcomp "+"| gawk 'NR>1' "+" >> "+file_out) else: for exon_index in range(len(self.exons)): start, stop = self.exons[exon_index] this_title=title.split()[0]+'_EXON'+str(exon_index+1) +' '*int( len(title.split())>1 )+ join(title.split()[1:], ' ') bbash('echo ">'+this_title+'" >> '+file_out) if self.strand=='+': fastasubseq(chrom_file, start-1, stop-start+1, file_out, pipecommand=" | gawk 'NR>1' >") #appending elif self.strand=='-': fastasubseq(chrom_file, start-1, stop-start+1, temp_folder+'gene_fasta_seq_to_revcomp') bbash("fastarevcomp "+temp_folder+"gene_fasta_seq_to_revcomp "+"| gawk 'NR>1' "+" >> "+file_out) if not to_file: if not split_exons: return parse_fasta(file_out).all()[0] else: return parse_fasta(file_out).all() def intersection_of(range1, range2): """assuming they are overlapping, and that they're like [start, stop] with start < stop""" return [ max(range1[0], range2[0]), min(range1[1], range2[1]) ] uniq_idfunctions_hash={'gtf': lambda s:s.split('transcript_id "')[1].split('"')[0], 'selenoprofiles1_gff': lambda s:s.split('\t')[1], 'selenoprofiles2_gff':lambda s:s.split('\t')[8].split()[0], 'gff':lambda s:s.strip().split('\t')[-1].split()[0]} def get_gff_format(gff_file): gff_format='' check_file_presence(gff_file) sample_lines=bbash('head -15 '+gff_file) if 'transcript_id "' in sample_lines: gff_format='gtf' elif '\tSP.' in sample_lines: gff_format='selenoprofiles1_gff' elif 'selenoprofiles' in sample_lines: gff_format='selenoprofiles2_gff' elif gff_file.endswith('.gff'): gff_format='gff' if gff_format: return gff_format else: raise Exception, "ERROR unknown gff format for file: "+gff_file def load_all_genes(gff_file, tag='cds', get_id='', add=None, is_sorted=False, **load_gff_args): """ load and returns all genes from a gff file, determining which line belong to which gene using the function get_id, given as input. This function must take a line as input and return its id, which is the same for lines describing the same gene object to be loaded. If not provided, it uses defaults function which depend on the extension of the file loaded Argument add can be provided: a function that takes the string of gff lines and the gene object, and may manipulate the gene object reading information from the lines. Normally the function makes no assumptions on the distribution of the lines belonging to the same entry (gene) in the file. If the file is sorted (the lines of the same gene entry are consecutive) you can specify is_sorted=True to make the function faster and less memory expensive. You can specify additional keyword arguments to control the behaviour of the load_gff function used to load the gff lines. available arguments are (see gene.load_gff documentation): check_sec_pos=True, keep_program=False, parse_keywords=False """ #I changed the name of the keyarg in this function. This is to work with old programs if 'uniqid_function' in load_gff_args: uniqid_function=load_gff_args['uniqid_function'] del load_gff_args['uniqid_function'] else: uniqid_function=get_id ### default if not uniqid_function: uniqid_function=uniq_idfunctions_hash[get_gff_format(gff_file)] genes=[] cfile=open(gff_file, 'r') if not is_sorted: #more memory expensive, but handles better any gff id2lines_list={} for line in cfile: line=line.strip() #;print [line] if line and not line[0]=="#" and (tag=='*' or lower(line.split('\t')[2]) == lower(tag) ): the_id=uniqid_function(line) if not id2lines_list.has_key(the_id): id2lines_list[the_id]='' id2lines_list[the_id]+=line +'\n' for the_id in sorted( id2lines_list.keys() ): gff_lines= id2lines_list[the_id] g=gene() g.load_gff( gff_lines, tag, **load_gff_args) g.id=the_id if not add is None: add(gff_lines, g) genes.append(g) else: ### old code. not proud. and: it can't work if the lines referring to the same entry are not consecutive current_id='' new_id=None gff_lines='' cline=cfile.readline() while cline: try: if not cline[0]=="#": if tag=='*' or lower(cline.split('\t')[2]) == lower(tag): new_id=uniqid_function(cline) if current_id!= new_id: if current_id and gff_lines: #if it is not the first entry, append to the output list the gene we just finished parsing g=gene() g.load_gff(gff_lines, tag, **load_gff_args) g.id=current_id if not add is None: add(gff_lines, g) genes.append(g) gff_lines='' current_id=new_id gff_lines+=cline except Exception, e: print "ERROR loading gff line: "+cline+" ### "+str(e) raise cline=cfile.readline() #last entry if current_id and gff_lines: #if it is not the first entry, append to the output list the gene we just finished parsing g=gene() g.load_gff(gff_lines, tag, **load_gff_args) g.id=current_id if not add is None: add(gff_lines, g) genes.append(g) cfile.close() return genes def order_genes_for_strand_chr_pos(x, y): """Order fucntion for genes: by strand, chromosome, positions start """ if x.strand!=y.strand: return (x.strand+' '+y.strand).index('+')-1 if x.chromosome!=y.chromosome: return cmp(x.chromosome, y.chromosome) else: return cmp(x.boundaries()[0], y.boundaries()[0]) order_genes_to_merge=order_genes_for_strand_chr_pos def order_genes_for_chr_strand_pos(x, y): if x.chromosome!=y.chromosome: return cmp(x.chromosome, y.chromosome) if x.strand!=y.strand: return (x.strand+' '+y.strand).index('+')-1 else: return cmp(x.boundaries()[0], y.boundaries()[0]) def order_genes_for_chr_pos(x, y): if x.chromosome!=y.chromosome: return cmp(x.chromosome, y.chromosome) else: return cmp(x.boundaries()[0], y.boundaries()[0]) try: from pygraph.classes.graph import graph from pygraph.algorithms.accessibility import connected_components class gene_overlap_graph(graph): """ Class to store overlaps between gene classes. Can't be initialized manually, it's only returned by function genes_overlap """ def are_overlapping(self, g1, g2): """ Tell if gene1 and gene2 (arguments) are overlapping, meaning, are connected in the graph""" return g2 in self.node_neighbors[g1] def all_overlapping_with(self, g1): """ Return the list of genes overlapping with argument g1""" return self.node_neighbors[g1] def overlap_clusters(self, min_size=1, sort=True): """ returns a list of lists of genes overlapping each other. the genes are not required to be all overlapping to each other (e.g. g1 overlaps g2 ; g2 overlaps g3; [ g3 do not overlap g1] --> [g1, g2, g3] will be returned Cluster minimal size is two. output list is sorted to have biggest clusters first, unless sort==False """ cc= connected_components(self) ## e.g .{a: 1, b: 1, c: 1, d: 1, e: 2, f: 2} if min_size>=2: ## removing clusters of size 1 for g in self.nodes: if not self.all_overlapping_with(g): del cc[g] ## here cc has already at least two elements with same label, meaning: gene not overlapping with anything are not here ##### transforming label hash such as cc in lists of elements to output connected_comp_lists_hash={} for k in cc: if not connected_comp_lists_hash.has_key( cc[k] ): connected_comp_lists_hash[ cc[k] ]= [] connected_comp_lists_hash[ cc[k] ].append( k ) out=connected_comp_lists_hash.values() if min_size>=3: out = [v for v in out if len(v)>=min_size] if sort: out.sort(key=len) return out except: pass def bedtools_intersect(gene_list, strand=True, options={}): ## implicit:{'s':True} if strand==True """ Open a temp file preparing the input to bedtool intersect, runs it and returns a filehandler on the result. The file is provided to bedtools both as input A and input B, to compute all against all overlaps. Basic commadnline executed: #bedtool intersect -a ALL_GENES.fa -b ALL_GENES.fa -wa -wb Options are passed as key of the dict options. When the value to a key is boolean True, that option has no argument. In any other case, the value is converted to string. E.g. bedtools_intersect(gene_list, strand=True, options={'sorted':True, 'f':0.5 } --> bedtool intersect -a ALL_GENES.fa -b ALL_GENES.fa -wa -wb -f 0.5 -sorted -s in output each gene is identified by its index in the list, e.g. chr4 9236903 9236953 11 0 + chr4 9236903 9236944 12 0 + ### 11 and 12 are the ids """ global temp_folder; temp_overlap_file= temp_folder+'gene_overlap_file.bed' test_writeable_folder(temp_folder, 'temp folder ! Not defined maybe? [Use set_local_folders(temp_folder)]' ) temp_overlap_file_h= open(temp_overlap_file, 'w') for g_index, g in enumerate(gene_list): print >> temp_overlap_file_h, g.bed( show_id = str(g_index), strand=strand ) temp_overlap_file_h.close() bedtools_intersect_command= "bedtools intersect {1} -wa -wb -a {0} -b {0} ".format(temp_overlap_file, {True:'-s', False:''}[bool(strand)]) for k in options.keys(): bedtools_intersect_command+='-'+str(k)+' ' if not options[k]==True: bedtools_intersect_command+=str(options[k])+' ' # print bedtools_intersect_command try: bp= bash_pipe(bedtools_intersect_command, return_popen=True) except IOError: #raise Exception, printerr("bedtools_intersect ERROR bedtools not installed??", 1) raise return bp #bbash(bedtools_intersect_command) # return bp def gene_clusters(gene_list, strand=True): """ Use bedtools intersect (temp_folder from MMib is used. See function set_local_folders) to compute overlaps between the genes. Returns two dictionaries: gene2cluster, cluster2genes where cluster is a numeric index (not consecutives) """ geneid2cluster_index={}; cluster_index=1; cluster_index2geneids={} geneid2gene={}; for g_index, g in enumerate(gene_list): geneid2gene[str(g_index)]=g bp=bedtools_intersect(gene_list, strand=strand) for line in bp.stdout: # print line splt=line.rstrip().split('\t') if strand: id_left = splt[3]; id_right= splt[9] else: id_left = splt[3]; id_right= splt[7] if id_left != id_right: if ( not id_left in geneid2cluster_index ) and ( not id_right in geneid2cluster_index ): #new cluster cluster_index2geneids [cluster_index] = [id_left, id_right] geneid2cluster_index[id_left]=cluster_index; geneid2cluster_index[id_right]=cluster_index cluster_index+=1 # print '1 creating with ids: ',id_left, id_right, ' the cluster ', cluster_index-1 elif ( id_left in geneid2cluster_index ) and ( not id_right in geneid2cluster_index ): cluster_index2geneids [ geneid2cluster_index[id_left] ].append( id_right ) geneid2cluster_index[id_right]= geneid2cluster_index[id_left] # print '2 moving id: '+id_right+' to cluster ', cluster_index elif ( not id_left in geneid2cluster_index ) and ( id_right in geneid2cluster_index ): cluster_index2geneids [ geneid2cluster_index[id_right] ].append( id_left ) geneid2cluster_index[id_left]= geneid2cluster_index[id_right] # print '3 moving id: '+id_left+' to cluster ', cluster_index elif geneid2cluster_index[id_left] != geneid2cluster_index[id_right] : #not id_left in geneid2cluster_index and not id_right in geneid2cluster_index is implicit #putting those in cluster of id_right into the cluster in id_left, unless the reverse is more efficient if len( cluster_index2geneids [ geneid2cluster_index[id_right] ] ) > len( cluster_index2geneids [ geneid2cluster_index[id_left] ] ): id_left, id_right = id_right, id_left # print '4 LEFT:', id_left, 'c:', geneid2cluster_index[id_left], 'RIGHT:', id_right, 'c:', geneid2cluster_index[id_right] cluster_index_to_remove = geneid2cluster_index[id_right] for gid in cluster_index2geneids [ geneid2cluster_index[id_right] ]: # print '4 moving id: '+gid, 'from cluster', cluster_index_to_remove, ' to cluster ', geneid2cluster_index[id_left] geneid2cluster_index[ gid ] = geneid2cluster_index[id_left] cluster_index2geneids[ geneid2cluster_index[id_left] ].extend( cluster_index2geneids[ cluster_index_to_remove ] ) # print '4 removing cluster', cluster_index_to_remove del cluster_index2geneids[ cluster_index_to_remove ] # producing dictionaries that can be used in output gene2cluster={}; cluster2genes={} for gid in geneid2gene: g=geneid2gene[gid] if gid in geneid2cluster_index: gene2cluster[ g ] = geneid2cluster_index[gid] else: gene2cluster[ g ] = cluster_index; cluster_index+=1 if not gene2cluster[ g ] in cluster2genes: cluster2genes[ gene2cluster[ g ] ]= [] cluster2genes[ gene2cluster[ g ] ].append(g) return gene2cluster, cluster2genes def remove_overlapping_gene_clusters(gene_list, scoring=len, cmp_fn=None, phase=False, strand=True, out_removed_genes=[], remember_overlaps=False, verbose=False, fix_ties=True, overlap_fn=None ): """ Returns a reduced version of the list in input, removing genes that overlaps. When two genes overlap, a score is assigned to each gene to decide which to keep -- similarly to the key argument to sort, you can provide a function as argument of scoring. by default, it's the gene lenght. Alternatively, you can use cmp_fn in a similar fashion to cmp in sort. It must accept two gene arguments, and return -1 if you want to keep the first argument, +1 if you want to keep the last one. Important: when you use cmp_fn, take care of ties! don't let python decide, or your results may not be reproducible. when scoring is chosen, this is avoided with a trick here, which adds very small quantities (max 0.001) to the scores of each gene object which depend on their ids. So, when using scoring, use integer scores or anyway make that in no case two gene objects must differ for more than 0.001! When you have complex overlap structures, this is what happens: -clusters of overlapping genes are built. In this cluster though, you may have pairs of genes not overlapping, but overlapping to something that overlaps the other (or even more far fetched than that) --> e.g g1 overlaps g2 ; g2 overlaps g3; [ g3 do not overlap g1] --> a cluster is [g1, g2, g3] -for each cluster, initially the best scoring gene is taken (this will be output), and all things really overlapping with it are thrown away. The procedure is repeated until no gene is left in this cluster. This will ensure that you have no overlaps in the output genes, and neither that you will take off genes without having something really overlapping with it in output The list out_removed_genes may be used to collect the genes removed from the input list. To use it, initialize an empty list variable, then pass it as this argument: # e.g. a_list=[] remove_overlapping_genes(gene_list, out_removed_genes=a_list) # now a_list contains the gene removed. When you use the out_removed_genes argument, you may want to know the correspondance between the genes removed and the ones kept, without recomputing overlaps. If you use remember_overlaps=True, the attribute .overlapping will be added to the removed genes; this is a link to the gene kept (which is present in the output, returned list) Normally the overlaps between any two genes is checked through two steps; first, the bedintersect tool, which can take into account the strand or not (depending on the argument of strand); second, the gene.overlaps_with function, which can take into account also the phase (frame). You can replace this second check with any given function providing it as argument of overlap_fn; this must take two gene arguments, and return True or False. If overlap_fn is provided, then the phase argument is ignored. """ outlist=[] #overlaps_graph= genes_overlap(gene_list, phase=phase, strand=strand) #clusters= overlaps_graph.overlap_clusters(min_size=1) gene2cluster, cluster2genes = gene_clusters( gene_list, strand=strand ) for cluster_id in cluster2genes.keys(): cluster= cluster2genes[cluster_id] #cluster is a gene list ## we do the following: we take the best scoring gene, we put this in the outlist, we throw away everything that overlaps with it, and we repeat until we finished the cluster while cluster: if cmp_fn: cluster.sort(cmp=cmp_fn) else: #I'm not giving directly scoring to sort because this may lead to different results in different runs, because of ties ( only if fix_ties is true ) hash_obj_to_score={} for obj in cluster: score=scoring(obj) if fix_ties: score+= string_hashed_to_number( str(obj.id), 0.001 ) hash_obj_to_score[obj]= score #print obj, score cluster.sort(key= lambda x:hash_obj_to_score[x], reverse=True) best_gene= cluster[0] genes_not_overlapping_best_gene=[] outlist.append(best_gene) for g in cluster: if g!=best_gene: they_overlap = best_gene.overlaps_with(g, phase=phase, strand=strand) if overlap_fn is None else overlap_fn(best_gene, g) if not they_overlap: ## gene that we're keeping for next cycle genes_not_overlapping_best_gene.append(g) else: ## genes that we're throwing away if remember_overlaps: g.overlapping= best_gene if verbose: printerr('removing: '+g.id+' --> keeping: '+best_gene.id, 1) out_removed_genes.append(g) cluster= genes_not_overlapping_best_gene return outlist #gs=load_all_genes('/users/rg/mmariotti/Archaea/ivan_dotu/aSeblastian/janna_all_methods.knownsp.output.all_secis.gff', tag='secis') #get_gene_overlaps(gs) def genes_overlap(gene_list, phase=False, strand=True): try: overlaps_graph= gene_overlap_graph() except NameError: raise ImportError, "ERROR pygraph modules not installed! can't initialize subclass gene_overlap_graph " list_no_empty=[g for g in gene_list if g] overlaps_graph.add_nodes(list_no_empty) #ordered_gene_list= sorted( list_no_empty, cmp=order_genes_for_chr_strand_pos ) ### old: doesn't work if strand==False ordered_gene_list= sorted( list_no_empty, cmp=order_genes_for_chr_pos ) for index1, g1 in enumerate(ordered_gene_list): right_boundary_g1=g1.boundaries()[1] index2=index1+1 while index2<len(ordered_gene_list) and ordered_gene_list[index2].boundaries()[0] <= right_boundary_g1: if g1.overlaps_with( ordered_gene_list[index2], phase=phase, strand=strand ): overlaps_graph.add_edge( (g1, ordered_gene_list[index2]) ) index2+=1 return overlaps_graph def remove_overlapping_genes(gene_list, scoring=len, cmp_fn=None, phase=False, strand=True, out_removed_genes=[], remember_overlaps=False, verbose=False, fix_ties=True): """ Returns a reduced version of the list in input, removing genes that overlaps. When two genes overlap, a score is assigned to each gene to decide which to keep -- similarly to the key argument to sort, you can provide a function as argument of scoring. by default, it's the gene lenght. Alternatively, you can use cmp_fn in a similar fashion to cmp in sort. It must accept two gene arguments, and return -1 if you want to keep the first argument, +1 if you want to keep the last one. Important: when you use cmp_fn, take care of ties! don't let python decide, or your results may not be reproducible. when scoring is chosen, this is avoided with a trick here, which adds very small quantities (max 0.001) to the scores of each gene object which depend on their ids. So, when using scoring, use integer scores or anyway make that in no case two gene objects must differ for more than 0.001! When you have complex overlap structures, this is what happens: -clusters of overlapping genes are built. In this cluster though, you may have pairs of genes not overlapping, but overlapping to something that overlaps the other (or even more far fetched than that) --> e.g g1 overlaps g2 ; g2 overlaps g3; [ g3 do not overlap g1] --> a cluster is [g1, g2, g3] -for each cluster, initially the best scoring gene is taken (this will be output), and all things really overlapping with it are thrown away. The procedure is repeated until no gene is left in this cluster. This will ensure that you have no overlaps in the output genes, and neither that you will take off genes without having something really overlapping with it in output The list out_removed_genes may be used to collect the genes removed from the input list. To use it, initialize an empty list variable, then pass it as this argument: # e.g. a_list=[] remove_overlapping_genes(gene_list, out_removed_genes=a_list) # now a_list contains the gene removed. When you use the out_removed_genes argument, you may want to know the correspondance between the genes removed and the ones kept, without recomputing overlaps. If you use remember_overlaps=True, the attribute .overlapping will be added to the removed genes; this is a link to the gene kept (which is present in the output, returned list) """ outlist=[] overlaps_graph= genes_overlap(gene_list, phase=phase, strand=strand) clusters= overlaps_graph.overlap_clusters(min_size=1) for cluster_index, cluster in enumerate(clusters): #cluster is a gene list ## we do the following: we take the best scoring gene, we put this in the outlist, we throw away everything that overlaps with it, and we repeat until we finished the cluster while cluster: if cmp_fn: cluster.sort(cmp=cmp_fn) else: #I'm not giving directly scoring to sort because this may lead to different results in different runs, because of ties ( only if fix_ties is true ) hash_obj_to_score={} for obj in cluster: score=scoring(obj) if fix_ties: score+= string_hashed_to_number( str(obj.id), 0.001 ) hash_obj_to_score[obj]= score #print obj, score cluster.sort(key= lambda x:hash_obj_to_score[x], reverse=True) best_gene= cluster[0] genes_not_overlapping_best_gene=[] outlist.append(best_gene) for g in cluster: if g!=best_gene: if not overlaps_graph.are_overlapping(best_gene, g): ## gene that we're keeping for next cycle genes_not_overlapping_best_gene.append(g) else: ## genes that we're throwing away if remember_overlaps: g.overlapping= best_gene if verbose: printerr('removing: '+g.id+' --> keeping: '+best_gene.id, 1) out_removed_genes.append(g) cluster= genes_not_overlapping_best_gene return outlist from hashlib import md5 def string_hashed_to_number(astring, n_max=1.0): """ convert a string to a hashed number ranging from 0 to n_max (def:1) """ m = md5(astring) number= int( m.hexdigest(), 16 ) #getting the number corresponding to the hex code returned by the md5 hashing functino higher_possible_number= int( 'f'*32 , 16 ) #highest possible number is all 'f' chars, and the string returned by hex digest is 32 chars long out= number*float(n_max) / higher_possible_number return out def merge_genes(gene_list, phase=False, inplace=False, mode='merge', id_mode='SUM', program_mode='LONGEST', removed_genes=[], remember_overlaps=False, strand=True, strict_overlap=False): """ This functions accepts a list of gene objects, and it merges them (in place, if inplace==True): overlapping genes (checked with the overlaps_with function, in case with phase) are merged. If mode=="merge", when two genes overlap, they're replaced by their union. The gene.id gets the name of the two ids joined by "_+_" (this behavior can be changed with id_mode... see union_with function in gene class. If mode=="longest", only the longest prediction among the overlapping ones is kept, and it is not modified. As mode, one can also provide a function which has to accept two gene object arguments and return a new one. In this case, id_mode and program_mode are ignored removed genes accepts a list which is modified inplace to contain to list of removed genes If the variable remember_overlaps is set to True, the genes returned in the removed_genes list contain an new attribute, .overlapping, which points to the overlapping gene which was kept When two genes are merged, the one which is kept will keep a list of the genes that were removed in its favor, and the overlaps will be computed also on this list of genes which it represents. For this reason, this may happen: g1 overlaps with g2 (and has priority over it). g2 overlaps with g3 but not with g1. --> g1 will be kept and g2 and g3 are removed -- even if g1 was not overlapping with g3. This method ensure consistency of results. anyway, if you want to turn this off, use strict_overlap=True. in this case, the overlaps are not computed with represented genes """ if not strand: raise Exception, "ERROR this function cannot be used to merge genes on different strands! Please convert all of them to + and rerun." if not mode in ['merge', 'longest'] and type(mode)!=type(lambda x:x): #the mode can also be a function telling which gene to keep when two overlap raise Exception, "merge_genes function, mode not recognized : "+str(mode) exon_list=[] #### I build an exon list which I can order by position start, so then I can exploit the fact the only adiacent exons can overlap. for g_index, g in enumerate(gene_list): for st, end in g.exons: e=gene(chromosome=g.chromosome, strand=g.strand) e.add_exon(st, end) e.id=g.id e.index=g_index #In every exon, I add a ".index" attribute to keep track from which gene it comes from exon_list.append(e) gene_list[g_index].index=g_index #then, I add the same attribute to the gene, to know its position in the gene list without navigating the whole list. gene_list[g_index].representing_genes=set() #index collection. Every time two genes g1 and g2 are found overlapping and g1 is kept but g2 is not, the g1.representing_genes will contain the index of g2, remembering that now g1 represents also g2 exon_list.sort(order_genes_to_merge) #now I cycle through each exon. At each cycle, there's one ruler, and the next one is checked. Anyway, since these are gene objects, I check the overlap of the full genes, instead of the single exons. NB: If gene1 and gene2 are overlapping, then one of the two is added a ".merged_in" attribute, which basically means, ignore this result and see gene n. (merged_in propriety) instead. for index_ruler in range(len(exon_list)-1): current_exon=exon_list[index_ruler]; next_exon=exon_list[index_ruler+1] gene1= gene_list[current_exon.index]; gene2=gene_list[next_exon.index] #dereferencing: if the exon belong to a gene which is been merged, consider the linked gene instead while not gene1['merged_in'] is None: #print "delink1: "+gene1.id +' --> '+str(gene1['merged_in'])+' '+gene_list[gene1.merged_in].id gene1=gene_list[gene1.merged_in] while not gene2['merged_in'] is None: #print "delink2: "+gene2.id +' --> '+str(gene2['merged_in'] )+' '+ gene_list[gene2.merged_in].id gene2=gene_list[gene2.merged_in] if gene1.index!=gene2.index: genes_overlaps=False if mode=='merge': genes_overlaps=gene1.overlaps_with( gene2, phase=phase, strand=strand ) else: # if mode != merge, then we must check if (any gene represented by) gene1 overlaps with any gene represented by gene2. Break statements are to optimize for g1_ind in set( [ gene1.index ] ) | gene1.representing_genes : if genes_overlaps: break for g2_ind in set( [ gene2.index ] ) | gene2.representing_genes: if genes_overlaps: break if gene_list[g1_ind].overlaps_with(gene_list[g2_ind], phase=phase, strand=strand): genes_overlaps=True # if gene1.overlaps_with( gene2, phase=phase ): #################### replacing one of the two genes if genes_overlaps: #print 'merging '+gene1.id+ ' '+gene2.id #for frid in gene1.id.split('_+_'): # if '_+_'+frid+'_+_' in gene2.id: # raise Exception, "time to die! "+frid if mode=='merge': gene_list[gene1.index] = gene1.union_with( gene2, id=id_mode, program=program_mode, index=gene1.index ) #with the union gene_list[gene2.index].merged_in = gene1.index elif mode=='longest': #with the longest of the two genes if gene1.length()>=gene2.length(): #print "longest is "+gene1.id+' (against '+gene2.id+') ; linking '+gene2.id +' to '+str(gene1.id) gene_list[gene2.index].merged_in = gene1.index else: #print "longest is "+gene2.id+' (against '+gene1.id+') ; linking '+gene1.id +' to '+str(gene2.id) gene_list[gene1.index].merged_in = gene2.index elif type(mode)==type(lambda x:x): #print "merging g1 ("+gene1.id+','+str(gene1.index)+') with g2 ('+gene2.id+','+str(gene2.index)+')', g = mode( gene1, gene2 ) ## evaluating which of the two overlapping genes should be kept #print ' --> '+g.id ################################################# #print 'merged_in', gene_list[gene1.index]['merged_in'], gene_list[gene2.index]['merged_in'], , gene2==g # may return gene1, gene2 or a new gene. if it is gene1 or gene2, I put this control to be sure that the .merged_in attribute is never pointing to itself. if gene2==g: #keeping gene2 g=g.copy(); g.index=gene2.index for gene_index in gene_list[gene1.index].representing_genes | set([gene1.index]): gene_list[gene_index].merged_in=g.index if not strict_overlap: g.representing_genes.add(gene_index) gene_list[gene2.index] = g gene_list[gene1.index].representing_genes=set() else: g=g.copy(); g.index=gene1.index for gene_index in gene_list[gene2.index].representing_genes | set([gene2.index]): gene_list[gene_index].merged_in=g.index if not strict_overlap: g.representing_genes.add(gene_index) gene_list[gene1.index] = g gene_list[gene2.index].representing_genes=set() list_out=[]; removed_gene_indexes=[] # unlinked_list=[] #will contain all the genes to reconsider, since : gene1 was merged into a gene2 that was merged into gene3 but now gene1 does not overlap with gene3. it is possible only if mode != merge for g_index in range(len(gene_list)): del gene_list[g_index].index #deleting non.std propriety if gene_list[g_index]['merged_in']!=None : #outputing all those which are not merged_in something else. # if mode!='merge': # #checking that the gene in which this was merged is really overlapping this gene. If the mode is not merge, it may not. In this case, we have to make a subsequent analysis # final_gene=gene_list[ gene_list[g_index]['merged_in'] ] # while final_gene['merged_in']!=None: # final_gene=gene_list[final_gene['merged_in']] # if not gene_list[g_index].overlaps_with(final_gene, phase=phase): # gene_list[g_index]['merged_in']=None # unlinked_list.append(gene_list[g_index]) final_gene=gene_list[ gene_list[g_index].merged_in ] if remember_overlaps: gene_list[g_index].overlapping= final_gene if bool(gene_list[g_index].representing_genes): raise Exception, "merge don't work" removed_genes.append(gene_list[g_index]) removed_gene_indexes.append(g_index) del gene_list[g_index]['merged_in'] else: if not inplace: list_out.insert(0, gene_list[g_index]) if inplace: gene_list.pop(g_index) #print "removing ", g_index del gene_list[g_index]['representing_genes'] # if unlinked_list: unlinked_list=merge_genes( unlinked_list, phase=phase, inplace=False, mode=mode, id_mode=id_mode, program_mode=program_mode ) if inplace: remove_items_from_list_with_indexes(gene_list, removed_gene_indexes, inplace=True) return gene_list else: return list_out def get_species_from_library(species_name, library=False): """ Parse the file species_library and returns a list: [taxid, scientific name] for the species name provided. This can differ from the name in output is the title provided is listed under the synonyms of the actual species name. If the species name is not found, returns None. It accepts also species names with masked characters as {chXX}, and also detects those names in which underscore means white space. For this method to work, the variable species_library must be accessible, and contain a string pointing to a names.dmp file as downloaded from ncbi taxonomy. Alternatively this file can be provided with the keyword argument library. """ if '_' in species_name and not ' ' in species_name: species_name=replace_chars(species_name, '_', ' ') species_name=unmask_characters(species_name) if not library: library=species_library cmnd='gawk -v input="'+species_name+'"'+""" -F"\\t" '{ if ($7=="scientific name"){ sc_name=$3; sc_name_taxid=$1}; if ($3 == input){ taxid=$1 }; if (taxid && taxid==sc_name_taxid){print taxid "\\t" sc_name; exit} }' """+library b=bash(cmnd) if b[0]: raise notracebackException, "ERROR searching species. something wrong in command: "+cmnd if not b[1]: return None else: return b[1].split('\t') def get_taxids_from_ncbi_db( scientific_name_list, ncbi_db='/users/rg/didac/NCBI/Taxonomy/names.dmp', temp_dir=None, silent=False, full=False ): """ Utility to get the taxids for a (high) number of scientific names. Input is: list of scientific names (not synonyms!), path to names.dmp file of ncbi_taxonomy, temporary folder (or temp_folder of MMlib will be used) and silent flag. If ambygous entries are present or some name is not found, warning are printed to screen, unless silent==True. Normally, an hash with {scientific_name -> taxid} is returned. If full is set to True, a tuple is returned like: out_hash, ambigous_hash, not_found_hash out_hash is like normal output. ambigous_hash collects the taxids of ambigous entries, like {scientific_name -> [taxid1, taxid2, .. ]}. not_found_hash collects the scientific names that were not found, like: {scientific_name -> 1 } NOTE: taxids returned are of type int """ if temp_dir is None: temp_dir = temp_folder t_file= temp_dir+'/sc_names_w_tab.txt' ; t_file_h= open(t_file, 'w') for sc_name in scientific_name_list: print >> t_file_h, "|\t"+sc_name+"\t|" t_file_h.close() grep_out= temp_dir+'/grep_out' bash( 'grep -Ff '+t_file+' ' +ncbi_db+' > '+grep_out) ambygous= temp_dir+'/ambygous_out' unambygous= temp_dir+'/unambygous_out' bash('rm '+ambygous) bash("gawk -F'|' '{if ($3!=\"\t\t\"){print $0 > \""+ambygous+"\"} else{ print } }' "+grep_out + " > "+unambygous) if is_file(ambygous): n_ambygous_entries = int( bash('wc -l '+ambygous)[1].split()[0] ) n_sc_names_ambygous = int( bash(" gawk -F'|' '{print $2}' "+ambygous+" | sort | uniq | wc -l " )[1].split()[0] ) else: n_ambygous_entries, n_sc_names_ambygous= 0, 0 n_unambygous_entries = int( bash('wc -l '+unambygous)[1].split()[0] ) if not silent and n_ambygous_entries: printerr("get_taxids_from_ncbi_db WARNING "+str(n_sc_names_ambygous)+ " scientific names in input are ambygous: a total of "+str(n_ambygous_entries)+ " entries were found for these. Excluding them from output...", 1 ) n_sc_names_found= n_sc_names_ambygous+ n_unambygous_entries if not silent and n_sc_names_found != len(scientific_name_list): printerr("get_taxids_from_ncbi_db WARNING "+str( len(scientific_name_list) - n_sc_names_found )+ " scientific names were not found!", 1 ) out_hash={} for line in open(unambygous): splt = line.split('\t') out_hash[ splt[2] ] = int(splt[0]) if full: ambigous_hash={} if n_ambygous_entries: for line in open(ambygous): splt = line.split('\t') if not ambigous_hash.has_key(splt[2]): ambigous_hash[ splt[2] ] = [] ambigous_hash[splt[2]].append( int(splt[0]) ) not_found_hash={} for n in scientific_name_list: if not ( n in out_hash or n in ambigous_hash): not_found_hash[n]=1 return out_hash, ambigous_hash, not_found_hash return out_hash class parser(object): """ This class handles reading from a text file, typically a sequence file. IT is meant to be a parent class for parsers. Usage: (example with parse_fasta) p=parse_fasta(file) #--> this opens the file, prepares a filehandler ; NOTE: a filehandler can be also provided instead of a file for object in p: #--> the next() method is used until a StopIteration is raised. Inside it, the method parse_next() is run. This is different for each parser metaclass, and returns object print object # [title, sequence] Example of parse_next (of parse_fasta parser): def parse_next(self): title=self.last_line[1:-1] seq='' self.last_line=self.file.readline() # ---> read a new line, stores it in self.last_line while self.last_line and self.last_line[0]!='>': # ---> when self.last_line is false (end of the file), next() will raise StopIteration instead of calling parse_next() seq+=replace_chars(self.last_line, ' \n\r\t', '') self.last_line=self.file.readline() return title, seq To build a new parser, define a child class of this superclass and define a parse_next method which parse self.last_line or, in case, the next lines and returns a desired object. """ def __getitem__(self, name): if name in dir(self): return self.__dict__[name] else: return None def __setitem__(self, key, value): self.__dict__[key]=value def __init__(self, filename='', **keyargs): for key in keyargs: self[key]=keyargs[key] if filename: self.load(filename) def load(self, filename=''): if not filename and self.file and type(self.file) == file: raise Exception, "parser ERROR: trying to load a filehandler which has already finished. Please instanciate another parser" if type(filename)==file: self.load_filehandler(filename) else: #string self.load_filename(filename) def load_filename(self, filename=''): if not filename: filename=self.file.name check_file_presence(filename, 'filename') self.file= open(filename, 'r') self.last_line=self.file.readline() def load_filehandler(self, fileh=''): self.file=fileh self.last_line=self.file.readline() def __iter__(self): return self def all(self): outlist=[] for i in self: outlist.append(i) if outlist==[None]: return [] return outlist def next(self, skip_comments=True): if self.file.closed: self.load() if not self.last_line: self.stop() if skip_comments and not ( self.__dict__.has_key('skip_comments') and not self['skip_comments'] ) : while self.last_line and (self.last_line[0]=='#'): self.last_line=self.file.readline() try: return self.parse_next() except StopIteration: raise except Exception, e: print "ERROR parsing file: "+self.file.name+' ; line: '+self.last_line raise def stop(self): self.file.close() raise StopIteration def parse_next(self): """This method is the key of the parser class and must be implemented for each parser. The method should read self.last_line and next line (in case it is necessary). Before returning the desired object, it should move the cursor self.last_line to the next line """ raise Exception, "ERROR the generic parser class has no parse_next method: you must define it in the metaclass" class parse_fasta(parser): remove_chars=set(['\n', '\r', '\t', ' ']) def parse_next(self): title=del_white(self.last_line[1:-1]) seq='' self.last_line=self.file.readline() while self.last_line and self.last_line[0]!='>': seq+=replace_chars(self.last_line, self.remove_chars, '') self.last_line=self.file.readline() return title, seq class parse_sam(parser): """ Returns a gene object for each line of the sam input. nornmally just positions, strand, chromosome, id and sequence are kept. if you want other attributes, define them as attributes of this parser object. You can have these attributes: 'flag', 'mapq', 'cigar', 'rnext', 'pnext', 'qual'] ( see SAM1 manual) Example, to have the qualities: p=parse_sam( somefile ) p.qual=True for g in p: print g.id, g.qual # --> the qualities are stored in the .qual attribute """ def parse_next(self): #write(self.last_line, 1, how='blue') splt= self.last_line.split('\t') qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, qual = splt[:11] flag, pos, mapq, pnext, tlen= int(flag), int(pos), int(mapq), int(pnext), int(tlen) g=gene(chromosome=rname, strand='+') g.add_exon( pos, pos+len(seq)-1 ) g.seq=seq g.id=qname for i in ['flag', 'mapq', 'cigar', 'rnext', 'pnext', 'qual']: if hasattr(self, i) and getattr(self, i): exec('g.'+i+' = '+i) self.last_line=self.file.readline() return g #class parse_gff(parser): # def parse_next(self): class rnazhit(object): """ This class read input from RNAz output as channeled by the program rnazWindows.pl run on clustalw format nucleotide alignments """ features_names=["Sequences", "Columns", "Reading direction", "Mean pairwise identity", "Shannon entropy", "G+C content", "Mean single sequence MFE", "Consensus MFE", "Energy contribution", "Covariance contribution", "Combinations/Pair", "Mean z-score", "Structure conservation index", "Background model", "Decision model", "SVM decision value", "SVM RNA-class probability", "Prediction"] def __init__(self, rnaz_string=''): self.data={} #stores every numeric feature in the header of rnaz output, which is like: Mean z-score: -0.09 # we index by the key: "Mean z-score" -- value is converted to approprioate type e.g. -0.09 -> float self.ali=alignment(); self.title2ss={}; self.title2zscore={}; self.title2mfe={}; self.title2code={}; if rnaz_string: self.load(rnaz_string) def load(self, rnaz_string): """ parse a string coming from parsing a rnaz output file""" self.__init__() #resetting data line_index=0; lines= rnaz_string.split('\n') ##parsing "key: value" file header while line_index<len(lines) and not lines[line_index].strip().startswith("Sequences"): line_index+=1 for line in lines[line_index:]: splt=line.split(':') if len(splt)<2: break self.data[splt[0].strip()]= option_value( splt[1].strip() ) ##checking for missing values err_msg='' for key in self.features_names: if not self.data.has_key(key): err_msg+=key+', ' if err_msg: raise Exception, "rnazhit load ERROR feature"+int(err_msg.count(',')>1)*"s"+" not found: "+err_msg[:-2] ### parsing alignment while line_index<len(lines) and not lines[line_index].strip().startswith(">"): line_index+=1 title, seq, ss= None, None, None for line in lines[line_index:]: if line.startswith(">"): title=line.strip()[1:] elif title and not seq: seq =line.strip() elif title and seq and not ss: ### line is now: # ..(((--))..)..ETCETERA..((.-))... ( -447.90, z-score = -0.85, S) ss =line.split()[0] self.ali.add(title, seq) self.title2ss[title]= ss if title!='consensus': self.title2zscore[title]=float(line.split("z-score = ")[1].split(",")[0]) self.title2mfe[title]=float(line.split(",")[0].split()[-1]) self.title2code[title]=line.rstrip().split()[-1][0] # R or S title, seq, ss= None, None, None if line.startswith("#"): break #checking evertyhing is there n=self.ali.nseq()-1 if n!= len(self.title2zscore) or n!= len(self.title2mfe) or n!=len(self.title2code) or n!=self.data['Sequences']: print self.summary() raise Exception, "rnazhit load ERROR some data was not found for all "+str(self.data['Sequences']) def positions(self): """ if RNAz was run channeled through rnazWindows (if not, return None) , returns two indexes which are found in the sequence titles; indexes are python style, 0 for first and end not included. e.g. (0, 120) """ try: return map(int, self.titles()[0].split('/')[-1].split('-')) except: return None def __nonzero__(self): return bool(self.data) def probability(self): if not self: return None return self.data["SVM RNA-class probability"] def MFE(self): if not self: return None return self.data["Consensus MFE"] def zscore(self): if not self: return None return self.data["Mean z-score"] def titles(self): return self.ali.titles() def seq_of(self, t): return self.ali.seq_of(t) def summary(self): """returns the (almost) same string parsed to generate this hit""" if not self: return None o='############################ RNAz ?.? ##############################\n\n' for k in self.features_names: o+=' '+k+': '+str(self.data[k])+'\n' o+='\n######################################################################\n\n' for t in self.titles(): o+='>'+t+'\n'+self.seq_of(t)+'\n'+self.title2ss[t]+'\n' return o __repr__= summary class parse_rnaz(parser): """ Parse a rnazhit object at each next() call""" def load(self, filename=''): if not filename: filename=self.file.name check_file_presence(filename, 'filename') self.file=open(filename, 'r') self.last_line=self.file.readline() while self.last_line and not (self.last_line.startswith("#") and "RNAz" in self.last_line): self.last_line=self.file.readline() #set first line to ############################ RNAz 2.1 ############################## def parse_next(self): rnaz_string='' self.last_line=self.file.readline() # skipping line ####(..)#### RNAz so we can parse until the next one. if there's none, self.last_line will be set on false and this method won't be called anymore while self.last_line and not (self.last_line.startswith("#") and "RNAz" in self.last_line): rnaz_string+=self.last_line self.last_line=self.file.readline() return rnazhit(rnaz_string) class estexoneratehit(gene): """ puppet class to manage est2genome exonerate predictions""" def gff(self): return gene.gff(self, tag='exon', program='exonerate') class parse_exonerate_est(parser): """ Parse an exonerate file and returns a exoneratehit object for each prediction inside. The target names obtained by fastasubseq are recognized and set to absolute coordinates. Also the target names obtained through the method from gene class "fasta_sequence", with title set to "fasta_title", are recognized (they are used in selenoprofiles). """ def load(self, filename=''): if not filename: filename=self.file.name check_file_presence(filename, 'filename') self.file = open(filename, 'r') self.last_line=self.file.readline() allowed_chars={'A':1, 'G':1, 'T':1, 'C':1, 'N':1, '-':1} def parse_next(self): line=self.last_line; cfile=self.file #necessary to recycle code: this below is the old exonerate parser # reading inputfile cont_b=0 while line and line !='-- completed exonerate analysis\n': full_query_block=''; full_target_block=''; ali_target_seq=''; ali_query_seq=''; while line and line !='-- completed exonerate analysis\n' and line.split(':')[0]!=" Target range": if line!='\n' and line.split()[0]=='Target:': full_target_name=del_white(line[:-1].split('Target:')[1]) line=cfile.readline() if line and line !='-- completed exonerate analysis\n': skip_intron=False passed_the_N_of_intron=False line=cfile.readline() line=cfile.readline() while line and line.split(':')[0]!='vulgar': align_block=[] # reading 4 lines-alignment blocks for i in range(4): align_block.append(line) line=cfile.readline() ali_block_start= align_block[0].find(':') ali_block_end = align_block[0].rfind(':') query_block= align_block[0] [ ali_block_start+2:ali_block_end-1 ] target_block= align_block[2] [ ali_block_start+2:ali_block_end-1 ] full_query_block += query_block full_target_block += target_block if len(full_query_block)!=len(full_target_block): raise Exception, "ERROR parsing exonerate_dna ! different lengths of full_query_block and full_target_block: \n"+full_query_block+'\n'+full_target_block for index, q_char in enumerate( full_query_block ): if q_char in self.allowed_chars and full_target_block[index] != '.': ali_query_seq += q_char ali_target_seq += full_target_block[index] vulgar_line=line qname=vulgar_line.split()[1] ; qstart=int(vulgar_line.split()[2]) + 1 ; qend=int(vulgar_line.split()[3]) tname=vulgar_line.split()[5]; real_tstart=int(vulgar_line.split()[6]) +1 ; real_tend=int(vulgar_line.split()[7]) if real_tstart> real_tend: #negative frame real_tstart = real_tstart-1 real_tend = real_tend+1 raw_score=vulgar_line.split()[9] ali=join(vulgar_line.split()[9:], ' ') line=cfile.readline() # processing considering subseqing #now in GFF comment startline if line=="# --- START OF GFF DUMP ---\n": while line[0]=='#': line=cfile.readline() #now on actual gff line gff_lines='' while line[0]!='#': gff_lines+=line line=cfile.readline() else: raise Exception, 'ERROR no gff ouput in the exonerated file; please run exonerate with --showtargetgff option' while line and line !='-- completed exonerate analysis\n' and line !='C4 Alignment:\n': line=cfile.readline() e=estexoneratehit() e.load_gff(gff_lines, tag='exon') e.alignment=alignment() e.alignment.add('q', ali_query_seq ); e.alignment.add('t', ali_target_seq ) e.query=gene(chromosome=qname, strand='+') e.query.add_exon(qstart, qend) e.id='est_exonerate:'+str(uniq_id(e)) e.query.id=str(uniq_id(e))+'_query' e.score=int(raw_score) if ':subseq(' in tname: strand_subseq='+' if '[revcomp]' in full_target_name: strand_subseq='-' start_subseq=int(tname.split(':subseq(')[1].split(',')[0] ) # 0 based length_subseq=int(tname.split(':subseq(')[1].split(')')[0].split(',')[1] ) e.chromosome=tname.split(':subseq(')[0] subseq_gene=gene(chromosome=tname, strand=strand_subseq) subseq_gene.add_exon(start_subseq+1, start_subseq+length_subseq) e.restore_absolute_coordinates(parent_gene=subseq_gene) elif '[positions:' in tname: subseq_gene=gene() subseq_gene.load_from_fasta_title(tname) e.chromosome=tname.split('[positions:')[0] e.restore_absolute_coordinates(parent_gene=subseq_gene) if line=='-- completed exonerate analysis\n': line='' self.last_line=line if e.length()!= len(nogap(e.alignment.seq_of('t'))): printerr('WARNING error in the exonerate file: '+(self.file.name)+' ; the gff does not have a perfect correspondence with the alignment displayed. To avoid crash, returning None like if it were an empty exonerate prediction', 1) return None return e class infernalhit(gene): """This class manages the predictions by infernal rna search program. The output accepted is the one by cmsearch """ def __init__(self, infile=''): self.ss='' self.query=gene() self.score=None self.evalue=None self.alignment=alignment() self.cmali=None gene.__init__(self) if infile: self.load(infile) def load(self, infile): """ accepts a file with a single hit """ self.__dict__ = parse_infernal(infile).all()[0] def summary(self): """ """ out='CM: '+str(self.query.chromosome)+'\n' out+='>'+self.chromosome+'\n\n' out+=' Strand = '+self.strand+'\n' out+=" Query = "+str(self.query.boundaries()[0])+' - '+str(self.query.boundaries()[1])+', Target = ' if self.strand=='+': out+=str(self.boundaries()[0])+' - '+str(self.boundaries()[1])+'\n' else: out+=str(self.boundaries()[1])+' - '+str(self.boundaries()[0])+'\n' out+=" Score = "+str(self.score) if not self.evalue is None: out+=", E = "+str(self.evalue) out+='\n\n' out+=' '*12+self.ss+'\n' out+=' '*12+self.alignment.seq_of('q')+'\n' out+='\n' #consensus line... too lazy to do it out+=' '*12+self.alignment.seq_of('t')+'\n' out+='\n' return out def abstract_line(self): """One line with all the information for the infernal """ out=self.query.chromosome+' '+self.chromosome+' '+self.strand+' TPOS:'+self.positions_summary()+' QPOS:'+self.query.positions_summary()+' S:'+str(self.score)+' E:'+str(self.evalue)+' '+self.alignment.seq_of('q')+' '+self.alignment.seq_of('t')+' '+self.ss return out def load_abstract_line(self, line): self.__init__() splt=line.split() self.query.chromosome=splt[0]; self.chromosome=splt[1]; self.strand=splt[2]; self.add_exons_from_positions_summary( splt[3].split('TPOS:')[1] ) self.query.add_exons_from_positions_summary( splt[4].split('QPOS:')[1] ); self.score= float(splt[5].split('S:')[1]); e=splt[6].split('E:')[1] if e!='None': self.evalue = e_v(e) self.alignment.add('q', splt[7] ); self.alignment.add('t', splt[8] ); self.ss = splt[9] def remove_Xs(self, gene_seq=None): """This function is for the infernal hits which contain insertions, kept as Xs in the virtual infernalhit object. When run, it interrogates the target file specified in .target and recovers the missing sequence. """ if not self.target: raise Exception, "infernalhit -> remove_Xs ERROR the .target attribute is not defined" if not gene_seq is None: gene_seq= replace( lower( gene_seq ), 't', 'u') if "x" in self.alignment.seq_of('t'): gaps_target=0 for pos in range(self.alignment.length()): nt=self.alignment.seq_of('t')[pos] if nt=='-': gaps_target+=1 elif nt=='x': p=pos while p<len(self.alignment.seq_of('t')) and self.alignment.seq_of('t')[p]=='x': p+=1 x_range_start = pos x_range_end = p-1 break x_range = self.subseq( x_range_start-gaps_target+1, x_range_end-x_range_start+1 ) if gene_seq is None: subseq_from_target= replace( lower( x_range.fasta_sequence()[1]), 't', 'u') else: subseq_from_target= gene_seq [x_range_start-gaps_target:x_range_end+1-gaps_target] if len(subseq_from_target)!=x_range.length(): raise Exception, "infernalhit->remove_Xs ERROR the sequence fetched for this hit has wrong length: subseq from target: {0} range analyzed: {1} ".format(len(subseq_from_target), x_range.length()) new_seq_target = self.alignment.seq_of('t')[:x_range_start]+subseq_from_target+self.alignment.seq_of('t')[x_range_end+1:] self.alignment.set_sequence('t', new_seq_target ) return self.remove_Xs(gene_seq=gene_seq) if "x" in self.alignment.seq_of('q') and not self.cmali is None: gaps_query=0 for pos in range(self.alignment.length()): nt=self.alignment.seq_of('q')[pos] if nt=='-': gaps_query+=1 elif nt=='x': p=pos while p<len(self.alignment.seq_of('q')) and self.alignment.seq_of('q')[p]=='x': p+=1 x_range_start = pos x_range_end = p-1 rf_no_insertions=replace(self.cmali.rf, '.', '') pos_in_rf=self.query.boundaries()[0]+ len( replace(self.alignment.seq_of('q')[:x_range_start], '-', '') )-1 #this var is 0 based rf_subseq= rf_no_insertions[pos_in_rf:pos_in_rf+x_range_end-x_range_start+1] new_seq_query=self.alignment.seq_of('q')[:x_range_start]+rf_subseq+self.alignment.seq_of('q')[x_range_end+1:] self.alignment.set_sequence('q', new_seq_query ) return self.remove_Xs(gene_seq=gene_seq) def get_pairs(self, unaligned=False): """returns the list of pairs in the target, as 0 based positions in the alignment (or in the target sequence if unaligned=True) """ pairs_in_model = ss_pairs( self.ss ) out=[ [first, second] for first, second in pairs_in_model if not ( self.alignment.seq_of('t') [first] == '-' or self.alignment.seq_of('t') [second] == '-' ) ] if unaligned: out=[ [self.alignment.position_in_seq('t', first+1)-1, self.alignment.position_in_seq('t', second+1)-1] for first, second in out ] return out def sequence(self): return upper(nogap(self.alignment.seq_of('t'))) def RNApackage_ss(self): """ return a fasta like string with sequence in target and secondary structure""" a=alignment() press=self.ss ss='' for char in press: if char in '<([{': ss+='(' elif char in '>)]}': ss+=')' else: ss+='.' seq=upper(self.alignment.seq_of('t')) find_index=seq.find('-') while find_index!=-1: # we're removing one character at a time: find_index seq=seq[:find_index]+seq[find_index+1:] ss=press[:find_index]+ss[find_index+1:] find_index=seq.find('-') return ">"+self.header()+'\n'+seq+'\n'+ss def ss_pairs(ss_string): """ Given a string with secondary structure -- in which pairs are represented by any parenthesis ([{< -- returns a list of tuples with 0 based positions of each pair, from 5' to 3'""" height = 0; stem = {}; pairs = []; # pos_rf=0; pos_t=0 for pos, ss_char in enumerate(ss_string): if ss_char in '([{<': stem[ height ] = pos height += 1 elif ss_char in ')]}>' and height > 0: height -= 1 if height < 0: break paired_pos = stem[height] pairs.append( (paired_pos, pos) ) return sorted(pairs, key = lambda x: x[0]) class parse_infernal(parser): """ Parse a cmsearch output (infernal rna search package) and return infernalhit objects at each next() call. It tries to identify the infernal version by parsing the first commented lines and look for something like: # INFERNAL 1.1.1 (July 2014) If your output does not have this, you can force version with this: the_parser = parse_infernal( your_file ) the_parser.infernal_version= '1.1' #for example for hit in the_parser: #now this should work if version is correct #do stuff with hit """ def load(self, filename=''): if not filename: filename=self.file.name check_file_presence(filename, 'filename') self.file=open(filename, 'r') self.current_cm='unknown' self.current_chromosome='unknown' self.infernal_version=None self.last_line=self.file.readline() while self.last_line and self.last_line.startswith('# '): if self.last_line.startswith('# INFERNAL '): self.infernal_version=self.last_line.split()[2][:3] self.last_line=self.file.readline() def parse_next(self): if self.infernal_version == '1.0': return self.parse_next_ver1_0() elif self.infernal_version == '1.1': return self.parse_next_ver1_1() else: raise Exception, "ERROR infernal version not recognized or supported: "+str(self.infernal_version) def parse_next_ver1_1(self): while self.last_line and not self.last_line.startswith('>>'): self.last_line=self.file.readline() if self.last_line.rstrip() and self.last_line.startswith('>>'): self.current_chromosome=self.last_line.rstrip().split()[1] self.last_line=self.file.readline() hit_lines = [] while self.last_line and not self.last_line.startswith('>>'): hit_lines.append( self.last_line.rstrip() ) self.last_line=self.file.readline() elif not self.last_line.rstrip(): self.stop() g=infernalhit() g.chromosome=self.current_chromosome scores = hit_lines[2].rstrip().split() g.evalue = e_v(scores[2]) g.score = float(scores[3]) query_start = int(scores[6]) query_end = int(scores[7]) target_start = int(scores[9]) target_end = int(scores[10]) g.strand = scores[11] hand_rf = '' if '-' in g.strand: target_start, target_end = target_end, target_start g.add_exon(target_start, target_end) query_seq=''; target_seq='' while hit_lines: current_line = hit_lines[0] if current_line.endswith('CS'): g.ss += hit_lines[0].split()[0].rstrip('CS') self.current_cm = hit_lines[1].split()[0] query_seq += ' '.join( hit_lines[1].split()[2:-1] ) target_seq += ' '.join( hit_lines[3].split()[2:-1] ) ## new, thanks to didac: del hit_lines[0:4] for next_line in hit_lines: if next_line.endswith('RF'): hand_rf += ' '.join( next_line.split()[0:-1] ) break del hit_lines[0:] ## if hit_lines: del hit_lines[0] g.query.chromosome = self.current_cm g.query.strand='+' g.query.add_exon(query_start, query_end) while "*" in query_seq: pos_insert=query_seq.find('*') insert_length_target= int( target_seq.split('[')[1].split(']')[0].strip() ) insert_length_query= int( query_seq.split('[')[1].split(']')[0].strip() ) if scores[8].startswith('~'): scores[8]='' query_seq = insert_length_query*'x' + query_seq[pos_insert+1:] target_seq = insert_length_query*'-' + target_seq[pos_insert+1:] g.ss = '~'*max([insert_length_query, insert_length_target]) + g.ss.lstrip('~') if hand_rf: hand_rf = '~'*max([insert_length_query, insert_length_target]) + hand_rf else: query_seq = query_seq.split('*')[0]+ insert_length_query*'x'+"-"* (insert_length_target-insert_length_query) + join( query_seq.split('*')[2:], '*' ) target_seq=target_seq.split('*')[0]+insert_length_target*'x'+"-"*(insert_length_query-insert_length_target) + join( target_seq.split('*')[2:], '*' ) l_ss_gap=0 while len(g.ss)>pos_insert+l_ss_gap and g.ss[pos_insert+l_ss_gap]=='~': l_ss_gap+=1 g.ss = g.ss[:pos_insert]+'~'*max([insert_length_query, insert_length_target])+g.ss[pos_insert+l_ss_gap:] if hand_rf: hand_rf = hand_rf[:pos_insert]+'~'*max([insert_length_query, insert_length_target])+hand_rf[pos_insert+l_ss_gap:] g.alignment.add('q', replace(query_seq, '.', '-')) g.alignment.add('t', replace(target_seq, '.', '-')) g.hand_rf = hand_rf return g def parse_next_ver1_0(self): while self.last_line and not self.last_line.startswith('>') and not self.last_line.startswith('CM:') and not self.last_line.strip().startswith('Query') : self.last_line=self.file.readline() if self.last_line.startswith('CM'): self.current_cm= self.last_line.rstrip().split('CM: ')[1] while self.last_line and not self.last_line.startswith('>') and not self.last_line.strip().startswith('Query'): self.last_line=self.file.readline() if self.last_line.startswith('>'): self.current_chromosome=self.last_line.rstrip()[1:] while self.last_line and not self.last_line.strip().startswith('Query') : self.last_line=self.file.readline() if not self.last_line: self.stop() #now line is like: Query = 1 - 86, Target = 9 - 89 query_start = int(self.last_line.split('Query =')[1].split('-')[0].strip()) query_end = int(self.last_line.split('Query =')[1].split(',')[0].split('-')[1].strip()) target_start= int(self.last_line.split('Target =')[1].split('-')[0].strip()) target_end = int(self.last_line.split('Target =')[1].split('-')[1].strip()) g=infernalhit() g.chromosome=self.current_chromosome if target_start<target_end: g.strand='+' else: g.strand='-' target_start, target_end = target_end, target_start g.add_exon(target_start, target_end) g.query.chromosome = self.current_cm g.query.strand='+' g.query.add_exon(query_start, query_end) self.last_line=self.file.readline() #now : Score = 22.84, GC = 51 OR : Score = 35.67, E = 6.512e-06, P = 5.57e-12, GC = 40 g.score= float( self.last_line.split(',')[0].split()[-1] ) if " E = " in self.last_line: g.evalue=e_v( self.last_line.split(',')[1].split()[-1] ) self.last_line=self.file.readline() self.last_line=self.file.readline() query_seq=''; target_seq='' while self.last_line.strip(): #two empty lines will make terminate this cycle. This is the signal for: end of this hit. #now in ss g.ss+=self.last_line.strip() self.last_line=self.file.readline() query_seq+= join(self.last_line.split()[1:-1], '') self.last_line=self.file.readline() self.last_line=self.file.readline() target_seq+=join(self.last_line.split()[1:-1], '') self.last_line=self.file.readline() self.last_line=self.file.readline() while "*" in query_seq: pos_insert=query_seq.find('*') insert_length_target= int( target_seq.split('*[')[1].split(']')[0].strip() ) insert_length_query= int( query_seq.split('*[')[1].split(']')[0].strip() ) query_seq= query_seq.split('*')[0]+ insert_length_query*'x'+"-"* (insert_length_target-insert_length_query) + join( query_seq.split('*')[2:], '*' ) target_seq=target_seq.split('*')[0]+insert_length_target*'x'+"-"*(insert_length_query-insert_length_target) + join( target_seq.split('*')[2:], '*' ) l_ss_gap=0 while len(g.ss)>pos_insert+l_ss_gap and g.ss[pos_insert+l_ss_gap]=='~': l_ss_gap+=1 g.ss = g.ss[:pos_insert]+'~'*max([insert_length_query, insert_length_target])+g.ss[pos_insert+l_ss_gap:] g.alignment.add('q', replace(query_seq, '.', '-')) g.alignment.add('t', replace(target_seq, '.', '-')) return g class covelshit(gene): """Gene class to manage predictions by covels """ def __init__(self, **kargs): self.score=None self.cm_file=None self.sequence_data=None gene.__init__(self, **kargs) def sequence(self): """ Returns the nucleotide sequence of this hit, obtained with lazy computing principle""" if self: if not self.sequence_data: self.sequence_data = upper(self.fasta_sequence()[1]) return self.sequence_data def summary(self): """ """ if not self: return 'Empty covelshit' out='cm_file: '+str(self.cm_file)+'\n' out+='>'+self.chromosome+'\n\n' out+=' Strand = '+self.strand+'\n' out+=" Score = "+str(self.score)+'\n\n' if self.strand=='+': out+=str(self.boundaries()[0]).ljust(15)+' ' else: out+=str(self.boundaries()[1]).ljust(15)+' ' out+=self.sequence() if self.strand=='+': out+=str(self.boundaries()[1]) else: out+=str(self.boundaries()[0]) return out class parse_covels(parser): """ Parse a covels-SE output covels objects at each next() call""" def load(self, filename=''): if not filename: filename=self.file.name check_file_presence(filename, 'filename') self.file=open(filename, 'r') self.target=None self.cm_file=None self.last_line=self.file.readline() while self.last_line and not ' : ' in self.last_line: if self.last_line.startswith('Database to search/score'): self.target= self.last_line.split()[-1] if self.last_line.startswith('Model'): self.cm_file=self.last_line.split()[-1] self.last_line=self.file.readline() def parse_next(self): while self.last_line and not ' : ' in self.last_line: self.last_line=self.file.readline() if self.last_line: g=covelshit(target=self.target, cm_file=self.cm_file) g.chromosome = self.last_line.split()[-1] g.score= float( self.last_line.split()[0] ) start = int( self.last_line.split()[1] ) end = int( self.last_line.split()[2] ) if start > end: g.strand='-' start, end= end, start else: g.strand='+' g.add_exon(start, end) self.last_line=self.file.readline() return g class erpinhit(gene): """Gene class to manage predictions by covels """ def __init__(self, **kargs): self.score=None self.evalue=None self.epn_file=None self.seq=None gene.__init__(self, **kargs) def summary(self): o= ' ERPIN hit -- ID: '+str(self.id)+'\n' o+='-Epn_file: '+str(self.epn_file)+'\n' o+='-Target: '+str(self.target)+'\n' o+='-Chromosome: '+str(self.chromosome)+' -Strand: '+str(self.strand)+' -Positions: '+str(self.boundaries()).strip('[').strip(']')+'\n' o+='-Score: '+str(self.score)+' -Evalue: '+str(self.evalue)+'\n' o+='-Seq: '+str(self.seq) return o class parse_erpin(parser): def clean_from_service_msg(self, line): if '\r' in line and 'Kb:' in line: st= line.find('Kb:') end= line.rfind('\r') line=line[:st]+line[end+1:] return line def load(self, filename=''): if not filename: filename=self.file.name check_file_presence(filename, 'filename') self.file=open(filename, 'r') self.target=None self.epn_file=None self.current_chromosome=None self.last_line=self.file.readline() while self.last_line and (len(self.last_line)<2 or ( not (self.last_line[:2] in ['FW', 'RC'] or self.last_line.startswith('>')) )): if self.last_line.startswith('Training set:'): self.epn_file=self.last_line.split('"')[1] elif self.last_line.startswith('Database:'): self.target=self.last_line.split('"')[1] self.last_line=self.clean_from_service_msg( self.file.readline() ) def parse_next(self): while self.last_line and (len(self.last_line)<2 or ( not (self.last_line[:2] in ['FW', 'RC'] or self.last_line.startswith('>')) )): self.last_line=self.clean_from_service_msg( self.file.readline() ) if self.last_line: if self.last_line.startswith('>'): self.current_chromosome = self.last_line.split()[0][1:] #taking only the first word for consistency with other parsers. here we would have the complete fasta title, anyway self.last_line=self.clean_from_service_msg( self.file.readline() ) g=erpinhit( chromosome= self.current_chromosome, strand={'FW':'+', 'RC':'-'}[self.last_line.split()[0]], epn_file=self.epn_file, target=self.target ) g.id = self.last_line.split()[1] g.score = float(self.last_line.split()[3]) g.evalue= float(self.last_line.split()[4].strip()) start_str, end_str= self.last_line.split()[2].split('..') g.add_exon(int(start_str), int(end_str)) self.last_line=self.clean_from_service_msg( self.file.readline() ) g.seq=self.last_line.strip() self.last_line=self.clean_from_service_msg( self.file.readline() ) return g else: self.stop() def secis_alignment(secis_list): """Input is a list of secis (gene) objects, which must have the primary sequence. The sequences must be split by white spaces in the ss components, as the output of Patscan does. The function aligns initially all these bits with each other, then realign the portions that may be improved """ pieces_lengths=[10, 8, 13, 5, 13, 2, 25, 14, 5, 10, 9, 10] alignment_pieces=[alignment() for i in range(12)] for secis in secis_list: #print secis.seq splt=secis.seq.split() if len(splt)!=12: raise Exception, "secis_alignment ERROR sequence provided in the secis must be split in the 12 components by white spaces" for i in range(12): if pieces_lengths[i]-len(splt[i])<0: raise Exception, "secis_alignment ERROR pieces length is too short for piece: "+str(i)+' '+str(len(splt[i]))+'>'+str(pieces_lengths[i]) alignment_pieces[i].add( secis.id, '-'*(pieces_lengths[i]-len(splt[i])) + splt[i] ) #completing seq to desired lenght using gaps final_ali=alignment() for secis in secis_list: seq='' for i in range(12): seq+=alignment_pieces[i].seq_of(secis.id) #+'Z' final_ali.add(secis.id, seq) pos, cols_list = 0, [] for piece_l in pieces_lengths: cols_list.append([pos, piece_l]) pos+=piece_l final_ali.realign_columns(input_list=cols_list) final_ali.remove_useless_gaps() final_ali.convert_sequences(upper) return final_ali global sorted_blast_hits_temp_list sorted_blast_hits_temp_list=[] class blasthit_list(list): """ Puppet class to handle the sorting of blast hits. This class can be initiated using another list as argument. The main and only method is sort. """ def __init__(self, inputlist=[]): self.features={} del self[:] for i in inputlist: self.append(i) def sort(self, sorting_guide=['chromosome', 'strand', 'position' ], inplace=True, toplevel=True): """ sorting_guide contains the field to sort the list, ordered by priority. It can also be a function to apply to the self to return the key to be evaluted with a < statement. By default, sorting_guide is ['chromosome', 'strand', 'position' ], so the output is like this: (genes on same chromosome clusters together, then inside those, genes on the same strand are clustered together, inside those cluster they are ordered by start position. This way of ordering facilitates merging. 1 generic_program cds 958795 959028 . + . 28215760 1 generic_program cds 3678820 3679188 . + . 28215376 10 generic_program cds 26931271 26931345 . - . 28214608 10 generic_program cds 26939920 26939970 . - . 28214736 10 generic_program cds 26996481 26996582 . - . 28214480 10 generic_program cds 27008546 27008674 . - . 28214992 11 generic_program cds 440442 440957 . + . 28216912 11 generic_program cds 110088239 110088574 . + . 28217040 13 generic_program cds 105985582 105986034 . - . 28217168 16 generic_program cds 56119934 56120302 . + . 28971408 16 generic_program cds 66120225 66120647 . + . 28971152 16 generic_program cds 2527808 2528107 . - . 28971280 """ global sorted_blast_hits_temp_list if sorting_guide: if toplevel: sorted_blast_hits_temp_list=[] key=str(sorting_guide[0]) if len(sorting_guide)>1 : self.features[key]={} #self.features.chromosome={} ->contains possible values of chromosome, linked to the list of those. each list is then sorted for bh in self: try: value=bh.__dict__[key] #chr1=bh.chromosome except: printerr('blast_list.sort ERROR can\'t obtain the field '+str(key)+' for blasthit '+str(bh), 1) raise if not self.features[key].has_key(value): self.features[key][value]=blasthit_list() #self.features.chromosome['chr1']= blastlist[] self.features[key][value].append(bh) # --> so we have a list of every blast hit in the chromsome 1, a list for chromsome 2.... for value in sorted(self.features[key].keys()): self.features[key][value].sort(sorting_guide[1:], toplevel=False) elif len(sorting_guide)==1 : if type(key)==str and key=='position': list.sort(self, key=lambda bh : bh[0][0]) # NB blast hits must have filled (at least one exon) elif type(key)==str: list.sort(self, key=lambda bh : bh.__dict__[key]) elif type(key)==type(lambda a:a): list.sort(self, key=key) sorted_blast_hits_temp_list.extend(self) if toplevel: self.features.clear() if inplace: self.__init__(sorted_blast_hits_temp_list) return self return blasthit_list( sorted_blast_hits_temp_list ) class species(): """ Will control connection with ncbi and tree information when I finish it """ def __init__(self, name=''): self.name='' self.taxid=0 if name: self.name=name def __str__(self): return self.name def fill_taxid(self, name='', silent=False): """ search the self.name or the name provided in ncbi taxonomy and assign a taxid. if none is found, raise a NameError exception. if more than one found, sets the one with shortest Scientific name as taxid, then raise a NameError exception with "WARNING" in the message. If silent == True, no exception is raised in any case. """ search_string = name if name else self.name search_results=ncbi_taxonomy.main({'silent':1, 'S':search_string, 'print_opt':1}) def search_result_to_id(line): return int( line.split()[0] ) def search_result_to_scientific_name(line): return del_white(line[:-1].split('->')[1].split('#')[0]) if not len(search_results.keys()): if not silent: raise NameError, "ncbi_taxonomy ERROR can't find species: "+search_string elif len(search_results.keys())> 1: taxid=int(min(search_results.keys(), key=lambda x : search_result_to_scientific_name(search_results[x]))) if not silent: raise NameError, "ncbi_taxonomy WARNING searching species '"+search_string+"' more than one species found: "+join([ search_result_to_scientific_name(s) for s in search_results ] , ',') self.taxid=search_result_to_id(search_results[search_results.keys()[0]]) return True def __nonzero__(self): return bool(self.taxid) def load_from_db(self, db_object): """ Utility useful just in combination with the sqlite database of selenoprofiles. It loads species object directly out of there. """ self.taxid=db_object[0] self.name=db_object[1] def name_with_underscores(self, chars_not_allowed=':/;#@%[]()<>&_='): """ Returns the name of the species formatted in order to be used as a folder or file name. Forbidden characters are replaced with {chX}, where X is the numeric ASCII code for the char.""" out=mask_characters(self.name, chars_not_allowed) return replace_chars(out, ' ', '_') def center_str(s, n_char, fill=' '): o=s while len(o) < n_char: o=fill+o+fill while len(o) > n_char: if len(o) % 2: o=o[:-1] else: o=o[1:] return o def ete_tree_correct_forbidden_chars(tree, forbidden_chars='():,', inplace=True): """ utility for ete to avoid the problem of newick conversion. In fact, in this format there are forbidden chars such as ( ) : which cannot be present in node names. This function detect the node with names including those characters and replace them to {chXX}, where XX is the ASCII integer identifier for the char. If inplace==True, the tree is modified in place, if it is not, a new tree with changed names is returned. """ if not inplace: tree=deepcopy(tree) for node in tree.traverse(): node.name=mask_characters(node.name, forbidden_chars) if not inplace: return tree def ete_tree_restore_forbidden_chars(tree, inplace=True): """ Reverse of the last function. Restore the names of a newly loaded tree changing the {chXX} occurences with the characters they mean """ if not inplace: tree=deepcopy(tree) for node in tree.traverse(): node.name=unmask_characters(node.name) if not inplace: return tree chars_to_replace_in_filenames='():,*./\'_=#[]' def mask_characters(a_string, chars='DEFAULT'): """ This function is used to mask some "forbidden characters", provided as any iterable (list). These characters are replaced with {chXX}, where XX is the ASCII integer identifier for the char. """ if chars=='DEFAULT': chars=chars_to_replace_in_filenames for char in chars: if char in a_string: a_string = replace_chars(a_string, char, '{ch'+str(ord(char))+'}') return a_string def unmask_characters(a_string, replace_underscores=False): """ This is the inverse of the last function. """ if replace_underscores: a_string=replace(a_string, '_', ' ') while '{ch' in a_string: try: char_n=int(a_string.split('{ch')[1].split('}')[0]) a_string=join(a_string.split('{ch'+str(char_n)+'}'), chr(char_n)) except: pass return a_string class pfamhit(gene): """ Class to manage pfamhits ... NOT FINISHED!! """ def load_data( pfam_family=None, pfam_start=None, pfam_end=None, query_name=None, query_start=None, query_end=None, ali=None ): """ Utility to load all the data providing it direclty as arguments. ali must be an alignment of two sequences, the first is the query and the last is the target. the titles are not taken in to account, they are saved into self.alignment with titles 'q' and 't' respectively. """ self.chromosome=pfam_family if pfam_start and pfam_end: self.add_exon(pfam_start, pfam_end) self.strand='+' self.query=gene() self.query.chromosome=query_name if query_start and query_end: self.query.add_exon(pfam_start, pfam_end) self.alignment=alignment() if ali and ali.nseq()!=2: raise Exception, "pfamhit->load ERROR alignment provided can have only two sequences... it has: "+str(ali.nseq()) self.alignment.add('q', ali.titles()[0]) self.alignment.add('t', ali.titles()[1]) def fasta_next_seq(filehandler, cline=''): """This function is thought to parse a fasta file and return a sequence at the time, to process each one without laoding the whole file. The input is a file handler, and a string for the line which the program may need to parse more than once... The use is: f=fasta_next_seq( seqs_file_h, '') while f: title, seq, cline= f ## perform operations on title, seq f=fasta_next_seq( seqs_file_h, cline) !!!! obsolete! use parse_fasta() class instead """ title, seq='', '' if not cline: cline=filehandler.readline() while cline: if cline[0]=='>': if title: return title, seq, cline title=cline[1:-1] else: seq+=cline[:-1] cline=filehandler.readline() if title: return title, seq, cline else: return None def check_file_presence(input_file, descriptor='input_file', exception_raised=Exception): if not input_file or not is_file(input_file): raise exception_raised, "ERROR "+descriptor+ ": "+str(input_file) +" not defined or not found. Run with option -h for help." def check_directory_presence(input_file, descriptor='folder', exception_raised=Exception): if not input_file or not is_directory(input_file): raise exception_raised, "ERROR "+descriptor+ ": "+str(input_file) +" not defined or not found. Run with option -h for help." def test_writeable_folder(folder, descriptor=''): rnumber= random.randint(1, 99999) folder=Folder(folder) filename=folder+'WrItE_TeSt.'+str(rnumber) if bash('echo "x" > '+filename+' && rm '+filename)[0]: raise Exception, "ERROR "+descriptor+ ": cannot write in "+folder is_file=os.path.isfile is_directory=os.path.isdir abspath=os.path.abspath base_filename=os.path.basename def directory_name(*args, **kargs): out=os.path.dirname(*args, **kargs) if out=='': return '.' return out file_size=os.path.getsize def list_of_lines(inputfile): """ Return the list of lines in file: inputfile, removing the newline characterss \\n """ check_file_presence(inputfile) out=[line.strip() for line in open(inputfile, 'r')] if out==['']: return [] return out def sankoff(tree, node2seqFn=None, matrix=None): """ Sankoff algorithm for the reconstrunction of ancestral states. tree is a ete2.Tree instance, node2seqFn is a function (leafnode)-> its seq, matrix is a hash with substitution costs (should have zeros in diagonal). NOTE: matrix.keys() is used to get all possible letter in the alphabet considered letters not present in the alphabet are considered equally (im)possibile. This have the effect of excluding those nodes for the calculation of ancestral states at that position. This makes possible its use for gapped alignments. A node2ancestral_sequence hash is returned """ if matrix is None: matrix={'A':{'A':0, 'C':5, 'G':2, 'T':5}, 'C':{'A':5, 'C':0, 'G':5, 'T':2}, 'G':{'A':2, 'C':5, 'G':0, 'T':5}, 'T':{'A':5, 'C':2, 'G':5, 'T':0} } if node2seqFn is None: node2seqFn= lambda x:x.sequence seq_length= len(node2seqFn( tree.get_leaves()[0] )) #testing node2seqFn alphabet=matrix.keys() node2ancestral={} for sequence_index in range(seq_length): ### assigning costs, leaf to root node2cost={}; node2costPerNt={}; for node in tree.traverse(strategy='postorder'): if node.is_leaf(): lett_this_node=node2seqFn(node)[sequence_index] node2cost[node]=0 node2costPerNt[node]={} for lett in alphabet: if lett==lett_this_node: node2costPerNt[node][lett]= 0 else: node2costPerNt[node][lett]= sys.maxint-1000 else: #initializing ancestral seq on first round (index=0) if not node2ancestral.has_key(node): node2ancestral[node]='' node2costPerNt[node]={} for lett in alphabet: c=0 for child in node.get_children(): #best nt: best_lett2=None for lett2 in alphabet: if node2costPerNt[child][lett2]==node2cost[child]: best_lett2=lett2 break if best_lett2 is None: cost_change=sys.maxint-1000 #special case else: cost_change= matrix[lett][best_lett2] c+=min( [node2cost[child]+ cost_change , node2costPerNt[child][lett] ] ) # cost change, cost unchange node2costPerNt[node][lett]=c node2cost[node] = min([ node2costPerNt[node][lett] for lett in alphabet ]) ## now backtracking, assigning ancestral states (root to leaves) for node in tree.traverse(strategy='preorder'): if node==tree: #root for lett in alphabet: if node2costPerNt[node][lett]==node2cost[node]: node2ancestral[node]+=lett break elif not node.is_leaf(): ########################## !!! ########################## ---> triple check this ONE: isn't it matrix[something] ? if node2cost[node]+1 > node2costPerNt[node][ node2ancestral[node.up][sequence_index] ]: node2ancestral[node]+=node2ancestral[node.up] [sequence_index] else: for lett in alphabet: if node2costPerNt[node][lett]==node2cost[node]: node2ancestral[node]+=lett break return node2ancestral codon2sitecount={} #{ codon: [sites] } #sites as with split_nonsense=True def count_sites(cds, silent=False, split_nonsense=False): """Counts the number of possible Syn and nonsyn sites for a input nucletoide coding sequence. As a single site can be partly non-syn and partly syn, the numbers returned are float (always mutiple of one third) The functino computes the number also separately for CpG sites. To obtain the number of nonCpG changes, subtract the number of CpG sites from the total number Codons with any character different from ACTG (for example, N) are skipped and a message is printed to stderr Returns [nonSyn, Syn, CpG_nonSyn, CpG_syn] if nonsense==True, nonsense (stop) mutations are differentiated from nonsyn mutations. the function instead returns [ nonSyn, Syn, NonSense, CpG_nonSyn, CpG_syn, CpG_nonsense ] """ global codon2sitecount cds=replace(upper(nogap(cds)), 'U', 'T') syn=0 ; nonsyn=0 # these will result to be three times as much the actual values: I dive them as the very last step! cpg_syn=0 ; cpg_nonsyn=0 nonsense=0; cpg_nonsense=0 #noncpg_syn=0 ; noncpg_nonsyn=0 if len(cds)%3!=0: raise Exception, "count_sites ERROR the sequence must be composed of codons (length multiple of 3)" for i_codon in range(len(cds)/3): ## cycling codons codon=cds[i_codon*3:i_codon*3+3] if not all([lett in 'ACTG' for lett in codon] ): if not silent: printerr('count sites WARNING skipping codon n.'+str(i_codon+1)+' : '+codon, 1) continue if codon in codon2sitecount: n,s,x,cn,cs,cx=codon2sitecount[codon] else: n,s,x,cn,cs,cx=0,0,0,0,0,0 for i_within_codon in range(3): ## cycling each position nt= codon[i_within_codon] i_cds=i_codon*3+i_within_codon syn_changes_this_pos=0; nonsense_this_pos=0 for alt_nt in 'ACTG': if alt_nt==nt: continue alt_codon= codon[:i_within_codon]+alt_nt+codon[i_within_codon+1:] if transl(alt_codon)==transl(codon): syn_changes_this_pos+=1 elif "*" in transl(alt_codon) +transl(codon): nonsense_this_pos+=1 is_cpg= ( nt == 'G' and ( ( i_cds+1<len(cds) and cds[i_cds+1] =='C' ) or (i_cds!=0 and cds[i_cds-1] =='C' ) ) ) or \ ( nt == 'C' and ( ( i_cds+1<len(cds) and cds[i_cds+1] =='G' ) or (i_cds!=0 and cds[i_cds-1] =='G' ) ) ) # # G and the next or previous is C OR #C and the s+=syn_changes_this_pos n+=(3-syn_changes_this_pos-nonsense_this_pos) x+=nonsense_this_pos #syn+= s #nonsyn+= n #nonsense+=nonsense_this_pos if is_cpg: cs+= syn_changes_this_pos cn+= (3-syn_changes_this_pos-nonsense_this_pos) cx+= nonsense_this_pos # cpg_syn+= syn_changes_this_pos # cpg_nonsyn+= (3-syn_changes_this_pos-nonsense_this_pos) # cpg_nonsense+= nonsense_this_pos codon2sitecount[codon]=n,s,x,cn,cs,cx syn+=s nonsyn+=n nonsense+=x cpg_syn+=cs cpg_nonsyn+=cn cpg_nonsense+=cx #else: # noncpg_syn+= syn_changes_this_pos # noncpg_nonsyn+= (3-syn_changes_this_pos) #write('Length: '+str(len(cds)), 1) #write('Syn: '+str(syn/float(3))+' Nonsyn: '+str(nonsyn/float(3))+' ', 1 ) #write('CpGSyn: '+str(cpg_syn/float(3))+' CpGNonsyn: '+str(cpg_nonsyn/float(3))+' ', 1 ) #write('NonCpGSyn: '+str(noncpg_syn/float(3))+' NonCpGNonsyn: '+str(noncpg_nonsyn/float(3))+' ', 1 ) if split_nonsense: return [ (nonsyn)/float(3), syn/float(3), nonsense/float(3), (cpg_nonsyn)/float(3), cpg_syn/float(3), (cpg_nonsense)/float(3)] else: return [ (nonsyn+nonsense)/float(3), syn/float(3), (cpg_nonsyn+cpg_nonsense)/float(3), cpg_syn/float(3)] def count_changes(cds, cds2, silent=True, split_nonsense=False): """ count the number of Syn and non.syn between two sequences. The function computes the number also separately for CpG sites. To obtain the number of nonCpG changes, subtract the number of CpG sites from the total number. NOTE: for CpG call, the function looks only at the first sequence provided. So for this, results are not completely symmetrical positions with a gap in any of the two sequences are skipped returns [non_syn, syn, cpg_non_syn, cpg_syn ] if nonsense==True, nonsense (stop) mutations are differentiated from nonsyn mutations. the function instead returns [ nonSyn, Syn, NonSense, CpG_nonSyn, CpG_syn, CpG_nonsense ] """ cds=replace(upper(cds), 'U', 'T') cds2=replace(upper(cds2), 'U', 'T') syn=0 ; nonsyn=0 ; nonsense=0 cpg_syn=0 ; cpg_nonsyn=0; cpg_nonsense=0 #noncpg_syn=0 ; noncpg_nonsyn=0 if len(cds)%3!=0 or len(cds2)%3!=0: raise Exception, "count_changes ERROR the sequences must be composed of codons (length multiple of 3)" if len(cds)!=len(cds2): raise Exception, "count_changes ERROR the sequences do not have the same length" for i_codon in range(len(cds)/3): ## cycling codons codon=cds[i_codon*3:i_codon*3+3] if not all([lett in 'ACTG' for lett in codon] ): if not silent: printerr('count_changes WARNING skipping codon n.'+str(i_codon+1)+' : '+codon, 1) continue codon2=cds2[i_codon*3:i_codon*3+3] if not all([lett in 'ACTG' for lett in codon2] ): if not silent: printerr('count_changes WARNING skipping codon2 n.'+str(i_codon+1)+' : '+codon2, 1) continue if ('-' in codon and codon!='---') or ('-' in codon2 and codon2!='---') : raise Exception, "count_changes ERROR the sequences must be aligned by codon, i.e. the gaps must be in groups of three" if '-' in codon or '-' in codon2: #skipping gap position continue if codon!=codon2: #change! (codon) if transl(codon)!=transl(codon2): if "*" in transl(codon) +transl(codon2): is_nonsyn=2 #nonsense mutation else: is_nonsyn=1 else: is_nonsyn=False for i_within_codon in range(3): if codon[i_within_codon]!=codon2[i_within_codon]: #change! (nt) i_cds=i_codon*3+i_within_codon nt=cds[i_cds] is_cpg= ( nt == 'G' and ( ( i_cds+1<len(cds) and cds[i_cds+1] =='C' ) or (i_cds!=0 and cds[i_cds-1] =='C' ) ) ) or \ ( nt == 'C' and ( ( i_cds+1<len(cds) and cds[i_cds+1] =='G' ) or (i_cds!=0 and cds[i_cds-1] =='G' ) ) ) # # G and the next or previous is C OR #C and the if not is_nonsyn: syn+=1 elif is_nonsyn==2: nonsense+=1 else: nonsyn+=1 if is_cpg: if not is_nonsyn: cpg_syn+=1 elif is_nonsyn==2: cpg_nonsense+=1 else: cpg_nonsyn+=1 if split_nonsense: return [nonsyn, syn, nonsense, cpg_nonsyn, cpg_syn, cpg_nonsense] else: return [nonsyn+nonsense, syn, cpg_nonsyn+cpg_nonsense, cpg_syn] def count_unique_changes(cds, other_cds_list, silent=True, split_nonsense=False): """ This function is an extension of count_changes, but counts changes in several sequences at the time, and filters out those common to more than a sequence. The non-uniqness is computed just based on the sequences and no tree is inspected. returns [non_syn, syn, cpg_non_syn, cpg_syn ] if nonsense==True, nonsense (stop) mutations are differentiated from nonsyn mutations. the function instead returns [ nonSyn, Syn, NonSense, CpG_nonSyn, CpG_syn, CpG_nonsense ] """ cds=replace(upper(cds), 'U', 'T') position_to_change={} ### hash keeping track of changes we already saw k: position (0based) -> list of other nts observed in any of other_cds syn=0 ; nonsyn=0 ; nonsense=0 cpg_syn=0 ; cpg_nonsyn=0 ; cpg_nonsense=0 for cds2 in other_cds_list: cds2=replace(upper(cds2), 'U', 'T') if len(cds)%3!=0 or len(cds2)%3!=0: raise Exception, "count_unique_changes ERROR the sequences must be composed of codons (length multiple of 3)" if len(cds)!=len(cds2): raise Exception, "count_changes ERROR the sequences do not have the same length" for i_codon in range(len(cds)/3): ## cycling codons codon=cds[i_codon*3:i_codon*3+3] if not all([lett in 'ACTG' for lett in codon] ): if not silent: printerr('count_unique_changes WARNING skipping codon n.'+str(i_codon+1)+' : '+codon, 1) continue codon2=cds2[i_codon*3:i_codon*3+3] if not all([lett in 'ACTG' for lett in codon2] ): if not silent: printerr('count_unique_changes WARNING skipping codon2 n.'+str(i_codon+1)+' : '+codon2, 1) continue if ('-' in codon and codon!='---') or ('-' in codon2 and codon2!='---') : raise Exception, "count_unique_changes ERROR the sequences must be aligned by codon, i.e. the gaps must be in groups of three" if '-' in codon or '-' in codon2: #skipping gap position continue if codon!=codon2: #change! (codon) if transl(codon)!=transl(codon2): if "*" in transl(codon) +transl(codon2): is_nonsyn=2 #nonsense mutation else: is_nonsyn=1 else: is_nonsyn=False for i_within_codon in range(3): if codon[i_within_codon]!=codon2[i_within_codon]: #change! (nt) i_cds=i_codon*3+i_within_codon nt=cds[i_cds]; nt2=cds2[i_cds] #### dtermining if the change is uniq if position_to_change.has_key( i_cds ) and nt2 in position_to_change[i_cds]: continue #skipping non-uniq #is uniq if not position_to_change.has_key( i_cds ): position_to_change[i_cds]=[] position_to_change[i_cds].append(nt2) is_cpg= ( nt == 'G' and ( ( i_cds+1<len(cds) and cds[i_cds+1] =='C' ) or (i_cds!=0 and cds[i_cds-1] =='C' ) ) ) or \ ( nt == 'C' and ( ( i_cds+1<len(cds) and cds[i_cds+1] =='G' ) or (i_cds!=0 and cds[i_cds-1] =='G' ) ) ) # # G and the next or previous is C OR #C and the if not is_nonsyn: syn+=1 elif is_nonsyn==2: nonsense+=1 else: nonsyn+=1 if is_cpg: if not is_nonsyn: cpg_syn+=1 elif is_nonsyn==2: cpg_nonsense+=1 else: cpg_nonsyn+=1 if split_nonsense: return [nonsyn, syn, nonsense, cpg_nonsyn, cpg_syn, cpg_nonsense] else: return [nonsyn+nonsense, syn, cpg_nonsyn+cpg_nonsense, cpg_syn] def add_ancestral_states(t, rst_file, add=True): """ This function must be called after a codeml analysis with RateAncestor=1 has been run using a certain tree t. this function parses the rst_file and populates the non-leaf node and adds them a .sequence attribute. A .index attribute is also added to all nodes (useful particularly for non-leaf nodes). If you don't want to modify the tree t, specify add=False. in this case, a hash would be returned with k: node -> value: sequence """ try: PhyloTree() except: from ete2 import PhyloTree fh=open(rst_file, 'r') line=fh.readline() while line and line != "tree with node labels for Rod Page's TreeView\n": line=fh.readline() if not line: raise IOError, "add_ancestral_states ERROR can't find line \"tree with node labels for Rod Page's TreeView\" in file: "+rst_file line=fh.readline() #now in next line: tree with nodes names like: (((((1_Homo_sapiens, 3_Pan_troglodytes) 11 , 2_Gorilla_gorilla) 10 , 4_Pongo_pygmaeus) 9 , 5_Macaca_mulatta) 8 , 6_Callithrix_jacchus) 7 ; tree_text=replace(line, ') ', '):') #making it read like it was the distance t2=PhyloTree(tree_text) for node in t2.traverse(): if node.is_leaf(): node.index= int(node.name.split('_')[0]) node.name= join( node.name.split('_')[1:], '_' ) else: node.name= str(int(node.dist)) node.index= int(node.dist) while line and line != "List of extant and reconstructed sequences\n": line=fh.readline() line=fh.readline() ; line=fh.readline() ; line=fh.readline() ; line=fh.readline() #now in line of first node while line.strip(): if line.startswith('node'): #ancestral node, reconstructed index=int(line.split()[1][1:]) #print 'searching index '+str(index) (t2&(str(index))).sequence= join(line.split()[2:], '') #print 'adding to node '+str(index)+' '+(t2&(str(index))).seq line=fh.readline() fh.close() if add==True: for node_t1, node_t2 in mapping_trees(t, t2): node_t1.index=node_t2.index if not node_t1.is_leaf() : node_t1.sequence=node_t2.sequence #and hasattr(node_t2, 'sequence') else: o={} for node_t1, node_t2 in mapping_trees(t, t2): if not node_t1.is_leaf(): o[node_t1]=node_t2.sequence return o def mapping_trees(t1, t2): """ Given two identical trees, it maps the nodes of one into the other, exploiting the leaves under each node -- a uniq property of trees (right?). The algorithm is not very efficient, it uses almost brute force""" hash_two2one={} for node1 in t1.traverse(): id1= join(sorted(node1.get_leaf_names()), '&?') for node2 in t2.traverse(): if not hash_two2one.has_key(node2): id2= join(sorted(node2.get_leaf_names()), '&?') if id2==id1: hash_two2one[node2]=node1 out=[] for node2 in hash_two2one: out.append( [hash_two2one[node2], node2] ) return out def color_scale(percent, c1='#000000', c2='#FFFFFF' ): """ This function is useful when using colors to represent some value. Given two colors and a normalized value between 0 and 1, it returns a color between c1 and c2. Extreme case: value0 --> returns c1, value1 -> returns c2. the colors must be provided as RGB hex-string, e.g. #008800 OR 008800 ; it is returned in the form #008800 """ def dec2hex(n): """return the hexadecimal string representation of integer n""" return "%X" % n def hex2dec(s): """return the integer value of a hexadecimal string s""" return int(s, 16) c1=c1.strip('#'); r1, g1, b1= hex2dec( c1[0:2] ), hex2dec( c1[2:4] ), hex2dec( c1[4:] ), c2=c2.strip('#'); r2, g2, b2= hex2dec( c2[0:2] ), hex2dec( c2[2:4] ), hex2dec( c2[4:] ), r3= r1+ (r2-r1)*float(percent) g3= g1+ (g2-g1)*float(percent) b3= b1+ (b2-b1)*float(percent) hr3=dec2hex(r3).rjust(2, '0') hg3=dec2hex(g3).rjust(2, '0') hb3=dec2hex(b3).rjust(2, '0') return '#'+hr3+hg3+hb3 def color_scale_midpoint(percent, c1, c2, mid="#FFFFFF"): """ same concept of color_scale, but uses a midpoint color. so if the value is below 0.5, the midcolor between c1 and mid is returned, otherwise the midcolor betwee c2 and mid is returned. """ def dec2hex(n): """return the hexadecimal string representation of integer n""" return "%X" % n def hex2dec(s): """return the integer value of a hexadecimal string s""" return int(s, 16) midpoint = 0.5 c2 = c2.strip('#'); r1, g1, b1 = hex2dec( c2[0:2] ), hex2dec( c2[2:4] ), hex2dec( c2[4:] ), c1 = c1.strip('#'); r2, g2, b2 = hex2dec( c1[0:2] ), hex2dec( c1[2:4] ), hex2dec( c1[4:] ), midcolor = mid.strip('#'); rm, gm, bm = hex2dec( midcolor[0:2] ), hex2dec( midcolor[2:4] ), hex2dec( midcolor[4:] ), if percent >= midpoint: r3 = rm+ (r2-rm)*float(percent - midpoint) * 2 g3 = gm+ (g2-gm)*float(percent - midpoint) * 2 b3 = bm+ (b2-bm)*float(percent - midpoint) * 2 elif percent < midpoint: r3 = rm+ (r1-rm)*float(percent) * 2 g3 = gm+ (g1-gm)*float(percent) * 2 b3 = bm+ (b1-bm)*float(percent) * 2 hr3 = dec2hex(r3).rjust(2, '0') hg3 = dec2hex(g3).rjust(2, '0') hb3 = dec2hex(b3).rjust(2, '0') return '#'+hr3+hg3+hb3 def LRT(lnL_M0,lnL_M1,np_M0, np_M1 ): """ returns the p-value of a LRT test using approximate chi2. inputs are: lnL_M0,lnL_M1,np_M0, np_M1 where M0 stands for null mode, M1 for alternative model, lnL is the logarithm of the likelihood, and np the number of parameters """ try: chi2 except NameError: from scipy.stats import chi2 D= 2*(lnL_M1 - lnL_M0 ) ddf= np_M1 - np_M0 p_value = 1 - chi2.cdf(D, ddf) return p_value def tagged_tree2codeml(t, format=9, tag="tag"): """This is to get newick representation of a tree that can be input to codeml. One or more species are tagged with numeric tags. To define these, you need to add features to the tree before running this function: node= t&"whatever" node.add_feature('tag', '1') then you can use this function to get something codeml friendly: print tagged_tree2codeml( t ) --> Normally the ete format is 9, which is, without branch support or distances. The tag name is "tag" """ s=t.write(format=format, features=[tag]) def repl_function(m): return "#"+m.group(1) return re.sub(r"\[&&NHX:"+tag+"=(\d+)\]", repl_function, s) def align_coding_sequences(titles_seqs_list, protein_alignment=False): """Aligns a set of coding sequences according to their peptide sequences. Normally, mafft is invoked to align residues. Otherwise, the alignment of their translation can be provided (alignemtn instance) as argument. titles_seqs_list is a list of elements like [title, seq] Returns an alignment instance. NOTE: it requires temp_folder to be defined if protein_alignment is not provided --> the realign function is used """ if not protein_alignment: protein_alignment=alignment() for t, s in titles_seqs_list: protein_alignment.add(t, transl(s)) protein_alignment.realign() if sorted(protein_alignment.titles()) != sorted([t for t, s in titles_seqs_list ]): for i in sorted(protein_alignment.titles()): print i print "--------------------------------" for i in sorted([t for t, s in titles_seqs_list ]): print i raise Exception, "align_coding_sequences ERROR the titles provided and those in the protein_alignment do not correspond!" cds_alignment=alignment() for t, s in titles_seqs_list: gaps=0; aligned_cds='' for p in range(protein_alignment.length()): if protein_alignment.seq_of(t)[p]=='-': aligned_cds+='---' gaps+=1 else: aligned_cds+= s[(p-gaps)*3:(p-gaps)*3+3] cds_alignment.add(t, aligned_cds) return cds_alignment def function_help(f): """ returns a ipython style doc for a certain function, describing arguments, defaults, and the __doc__ string""" import inspect h=str(f.__name__)+'(' args, varargs, varkw, defaults = inspect.getargspec(f) if defaults is None: n_defaults=0 else: n_defaults=len(defaults) for i in range(len(args) - n_defaults ): #non defaulted args h+=args[i]+', ' for i in range(n_defaults ): #non defaulted args value=defaults[i] if type(value)==str: value='"'+value+'"' else: value=str(value) h+=args[i+len(args) - n_defaults]+'=' +value+', ' h=h[:-2] +')\n\n' h+=str(f.__doc__)+'\n' return h def interactive_mode(vars=None, message="welcome to the shell" ): """ To open an interactive shell inside a python script. Usage: interactive_mode()() ; double parenthesis is because this returns a pointer to a function. """ #prompt_message = "Welcome! Useful: G is the graph, DB, C" prompt_message = message try: from IPython.Shell import IPShellEmbed ipshell = IPShellEmbed(argv=[''],banner=prompt_message,exit_msg="Goodbye") return ipshell except ImportError: if vars is None: vars=globals() import code import rlcompleter import readline readline.parse_and_bind("tab: complete") # calling this with globals ensures we can see the environment print prompt_message shell = code.InteractiveConsole(vars) return shell.interact def load_chromosome_lengths(chromosome_length_file, max_chars=0, exception_raised=Exception): """Utility to load chromosome lenghts from a fastalength output file and also set it as a MMlib variable; also performing controls on the file """ global chromosome_lengths; chromosome_lengths={} for line in open(chromosome_length_file, 'r'): fasta_identifier = line.split()[1] length=int(line.split()[0]) if chromosome_lengths.has_key(fasta_identifier): bash('rm '+chromosome_length_file) raise exception_raised, "ERROR the target file has a duplicate fasta identifier! ("+line.split()[1]+') Please modify it and rerun. Note: remove the .index and *.fa.n* blast formatting files after changing the target file' if length==0: bash('rm '+chromosome_length_file) raise exception_raised, "ERROR the target file has a length zero entry! ("+line.split()[1]+') Please modify it and rerun. Note: remove the .index and *.fa.n* blast formatting files after changing the target file' if is_number(fasta_identifier) and fasta_identifier[0]=='0': bash('rm '+chromosome_length_file) raise exception_raised, "ERROR the target file has a numeric fasta identifier starting with zero! ("+line.split()[1]+') This would cause an unexpected blast behavior. Please modify this or these ids and rerun. Note: remove the .index and *.fa.n* blast formatting files after changing the target file' if ':subseq(' in fasta_identifier: bash('rm '+chromosome_length_file) raise exception_raised, "ERROR with fasta header: "+fasta_identifier+' ; this was generated by fastasubseq and will cause unexpected behavior of this program, since it is using fastasubseq itself to cut sequences. Please clean the titles in your target file from ":subseq(" tags. Note: remove the .index and *.fa.n* blast formatting files after changing the target file ' if max_chars and len(fasta_identifier)>max_chars: bash('rm '+chromosome_length_file) raise exception_raised, "ERROR with fasta header: "+fasta_identifier+' is too long. The maximum length for a fasta identifier (first word of the title) is '+str(max_chars)+' characters. Please clean the titles in your target file. Note: remove the .index and *.fa.n* blast formatting files after changing the target file' if '/' in fasta_identifier: bash('rm '+chromosome_length_file) raise exception_raised, "ERROR with fasta header: "+fasta_identifier+' has forbidden character: "/" \nPlease clean the titles in your target file. Note: remove the .index and *.fa.n* blast formatting files after changing the target file' chromosome_lengths[fasta_identifier]=length set_MMlib_var('chromosome_lengths', chromosome_lengths) def get_chromosome_lengths(): global chromosome_lengths return chromosome_lengths def RNAplot(seq, ss, fileout, label='', options=''): """Produce a structure plot with RNAplot into fileout. Standard postscript file is converted to specified format (from extension) if not .ps (imagemagik suite required) Label, if provided, adds a text label below the structure """ global temp_folder bbash( 'cd {temp}; echo ">title\n{seq}\n{ss}" | RNAplot {opts} '.format(temp=temp_folder, seq=seq, ss=ss, opts=options) ) expected_file=temp_folder+'title_ss.ps' output_extension = fileout.split('.')[-1] if output_extension=='ps' and not label: bbash('mv {ps} {out}'.format(ps=expected_file, out=fileout)) else: labelbit='' if not label else '-label "{lab}"'.format(lab=label) bbash( 'montage -geometry +1+1 -density 150 {labelbit} {ps} {out}'.format(ps=expected_file, out=fileout, labelbit=labelbit) ) def RNAfold(seq, constraints=None, img=None, options='', title=None, rnaplot_options=''): """Runs RNAfold to predict the secondary structure of this sequence. Optionally can produce an image (multiple extensions accepted); It can accept fold constraints; possible characters: (.) ; Returns: [secondary structure, free energy] Secondary structure reported will have the length of the input sequence (without gaps, if any)""" seq=nogap(seq) if constraints: accepted_chars={'|':0, 'x':0, '<':0, '>':0, '(':0, ')':0, '.':0} if not len(constraints)==len(seq): raise Exception, 'RNAfold ERROR lenght of sequence and of constraints must be the same! Seq.length: {s} Constraints: {c}'.format(s=len(seq), c=len(constraints)) if not all([c in accepted_chars for c in constraints] ): raise Exception, 'RNAfold ERROR illegal characters in constraints. Provided: {c}'.format(c=constraints) add='' if not constraints else '\n'+constraints if constraints: options+=' -C ' rnafold_out=bbash( 'echo ">title\n{seq}{add}" | RNAfold --noPS {opts} '.format(temp=temp_folder, seq=seq, add=add, opts=options) ) free_energy= float(rnafold_out.split('(')[-1][:-1] ) ss= rnafold_out.split('\n')[-1].split( ' (')[0] label='{tit}E= {e}'.format(tit=title+'\n' if title else '', e=free_energy) if not img is None: RNAplot(seq, ss, fileout=img, label=label, options=rnaplot_options) return [free_energy, ss]
marco-mariotti/selenoprofiles
MMlib.py
Python
gpl-2.0
341,845
[ "BLAST" ]
fd7a1f4d9455d46706449c4a1aa9c9e05d9a95b937734530d37b09e2255b268f
# Licensed under GPL version 3 - see LICENSE.rst import numpy as np from astropy.table import Column import astropy.units as u from astropy.coordinates import SkyCoord from ..base import SimulationSequenceElement from ..math.rotations import axangle2mat from ..math.utils import norm_vector, h2e, e2h class PointingModel(SimulationSequenceElement): '''A base model for all pointing models Conventions: - All angles (``ra``, ``dec``, and ``roll``) are given in decimal degrees. - x-axis points to sky aimpoint. - ``roll = 0`` means: z axis points North (measured N -> E). For :math:`\delta \pm 90^{\circ}` the :math:`\alpha` value is irrelevant for the pointing direction - any right ascension will lead to a pointing on the pole. A value for ``ra`` is still required, because it determines the orientation of the detector plane. Obviously, for pointing straight at the pole, the simple interpretation *z axis points north* is meaningless, but the combination of ``ra``, ``dec`` and ``roll`` still uniquely determines the position of the coordinate system. ''' def add_dir(self, photons): linecoords = Column(name='dir', length=len(photons), shape=(4,)) photons.add_column(linecoords) photons['dir'].unit = u.mm # Leave everything unset, but chances are I will forget the 4th # component. Play safe here. photons['dir'][:, 3] = 0 def process_photons(self, photons): self.add_dir(photons) return photons class FixedPointing(PointingModel): r'''Transform spacecraft to fixed sky system. This matrix transforms from the spacecraft system to a right-handed Cartesian system that is defined in the following way: the (x,y) plane is defined by the celestial equator, and the x-axis points to :math:`(\alpha, \delta) = (0,0)`. Parameters ---------- coords : `astropy.coordinates.SkySoord` Position of the source on the sky. roll : `~astropy.units.quantity.Quantity` ``roll = 0`` means: z axis points North (measured N -> E). reference_transform : np.array of shape (4, 4) By default, photons from an on-axis source come in parallel to the x-axis of the coordinate system. Their direction points from x=+inf inwards. If the simulation uses a different coordinate system (e.g. the optical axis is along the z-axis) set ``reference_transform`` to a matrix that performs the conversion. The optical axis of the telescope is the normal to the surface of its entrance aperture. The pointing needs to know this to determine the correct direction of the photons. Also, sources that do not shine directly onto the telescope aperture but hit it at an angle, will see a smaller projected geometric area. This is taken into account by reducing the probability of off-axies photons accordingly, and thus this object needs to know the orientation (the direction f the optical axis and rotation) of the aperture. Notes ----- For :math:`\delta \pm 90^{\circ}` the :math:`\alpha` value is irrelevant for the pointing direction - any right ascension will lead to a pointing on the pole. A value for ``ra`` is still required, because it determines the orientation of the detector plane. Obviously, for pointing straight at the pole, the simple interpretation *z axis points north* is meaningless, but the combination of ``ra``, ``dec`` and ``roll`` still uniquely determines the position of the coordinate system. ''' def __init__(self, **kwargs): self.coords = kwargs.pop('coords') if not self.coords.isscalar: raise ValueError("Coordinate must be scalar, not array.") self.roll = kwargs.pop('roll', 0. * u.rad) self.reference_transform = kwargs.pop('reference_transform', np.eye(4)) super(FixedPointing, self).__init__(**kwargs) @property def offset_coos(self): '''Return `~astropy.coordinates.SkyOffsetFrame`''' return self.coords.skyoffset_frame(rotation=self.roll) def photons_dir(self, coos, time): '''Calculate direction of photons in homogeneous coordinates. Parameters ---------- coos : `astropy.coordiantes.SkyCoord` Origin of each photon on the sky time : np.array Time for each photons in sec Returns ------- photons_dir : np.array of shape (n, 4) Homogeneous direction vector for each photon ''' photondir = coos.transform_to(self.offset_coos) # Minus sign here because photons start at +inf and move towards origin photonsdir = norm_vector(-photondir.cartesian.xyz.T) return np.einsum('...ij,...j->...i', self.reference_transform, e2h(photonsdir, 0)) def photons_pol(self, photonsdir, polangle, time): '''Calculate a polarization vector for linearly polarized light. The current definition cannot handle photons coming exactly from either the North pole or the South Pole of the sphere, because the polangle definition "North through east" is not well-defined in these positions. Parameters ---------- photonsdir : np.array of shape (n, 4) Direction of photons polangle : np.array Polarization angle measured N through E. If polangle has no units, it is assumed to be specified in radian. time : np.array Time for each photons in sec ''' if hasattr(polangle, "unit") and (polangle.unit is not None): polangle = polangle.to(u.rad) north = SkyCoord(0., 90., unit='deg', frame=self.coords) northdir = e2h(north.transform_to(self.offset_coos).cartesian.xyz.T, 0) northdir = np.dot(self.reference_transform, northdir) n_inskyplane = norm_vector(northdir - photonsdir * np.dot(northdir, photonsdir.T)[:, None]) e_inskyplane = e2h(np.cross(photonsdir[:, :3], n_inskyplane[:, :3]), 0) return np.cos(polangle)[:, None] * n_inskyplane + np.sin(polangle)[:, None] * e_inskyplane def process_photons(self, photons): ''' Parameters ---------- photons : `astropy.table.Table` ''' photons = super(FixedPointing, self).process_photons(photons) photons['dir'] = self.photons_dir(SkyCoord(photons['ra'], photons['dec'], unit='deg'), photons['time'].data) photons['polarization'] = self.photons_pol(photons['dir'].data, photons['polangle'].data, photons['time'].data) photons.meta['RA_PNT'] = (self.coords.ra.degree, '[deg] Pointing RA') photons.meta['DEC_PNT'] = (self.coords.dec.degree, '[deg] Pointing Dec') photons.meta['ROLL_PNT'] = (self.roll.to(u.degree).value, '[deg] Pointing Roll') photons.meta['RA_NOM'] = (self.coords.ra.degree, '[deg] Nominal Pointing RA') photons.meta['DEC_NOM'] = (self.coords.dec.degree, '[deg] Nominal Pointing Dec') photons.meta['ROLL_NOM'] = (self.roll.to(u.degree).value, '[deg] Nominal Pointing Roll') return photons class JitterPointing(FixedPointing): '''Transform spacecraft to fixed sky system. This extends `marxs.sourcs.FixedPointing` by adding a random jitter coordinate. In this simple implementation the jitter angles applied to two consecutive photons are entirely uncorrelated, even if these two photons arrive at the same time. This class makes the assumption that jitter is small (no change in the projected geometric area of the aperture due to jitter). Parameters ---------- jitter : `~astropy.units.quantity.Quantity` Gaussian sigma of jitter angle ''' def __init__(self, **kwargs): self.jitter = np.abs(kwargs.pop('jitter')) super(JitterPointing, self).__init__(**kwargs) def process_photons(self, photons): photons = super(JitterPointing, self).process_photons(photons) # Get random jitter direction n = len(photons) randang = np.random.rand(n) * 2. * np.pi ax = np.vstack([np.zeros(n), np.sin(randang), np.cos(randang)]).T if self.jitter > 0: # For comparison it's often useful to run a model with jitter=0 # but that would fail np.random.normal(scale=0) jitterang = np.random.normal(scale=self.jitter.to(u.radian).value, size=n) jitterrot = axangle2mat(ax, jitterang) photons['dir'] = e2h(np.einsum('...ij,...i->...j', jitterrot, h2e(photons['dir'])), 0) photons['polarization'] = e2h(np.einsum('...ij,...i->...j', jitterrot, h2e(photons['polarization'])), 0) return photons
hamogu/marxs
marxs/source/pointing.py
Python
gpl-3.0
9,206
[ "Gaussian" ]
9d3d233f873cdc30c6e7eb468519fabb40a0b1c36a4c327f18b53ac2ef751e0f
""" homeassistant.components.isy994 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Connects to an ISY-994 controller and loads relevant components to control its devices. Also contains the base classes for ISY Sensors, Lights, and Switches. For configuration details please visit the documentation for this component at https://home-assistant.io/components/isy994.html """ import logging from urllib.parse import urlparse from homeassistant import bootstrap from homeassistant.loader import get_component from homeassistant.helpers import validate_config from homeassistant.helpers.entity import ToggleEntity from homeassistant.const import ( CONF_HOST, CONF_USERNAME, CONF_PASSWORD, EVENT_PLATFORM_DISCOVERED, EVENT_HOMEASSISTANT_STOP, ATTR_SERVICE, ATTR_DISCOVERED, ATTR_FRIENDLY_NAME) DOMAIN = "isy994" DEPENDENCIES = [] REQUIREMENTS = ['PyISY>=1.0.5'] DISCOVER_LIGHTS = "isy994.lights" DISCOVER_SWITCHES = "isy994.switches" DISCOVER_SENSORS = "isy994.sensors" ISY = None SENSOR_STRING = 'Sensor' HIDDEN_STRING = '{HIDE ME}' CONF_TLS_VER = 'tls' _LOGGER = logging.getLogger(__name__) def setup(hass, config): """ Setup ISY994 component. This will automatically import associated lights, switches, and sensors. """ try: import PyISY except ImportError: _LOGGER.error("Error while importing dependency PyISY.") return False # pylint: disable=global-statement # check for required values in configuration file if not validate_config(config, {DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]}, _LOGGER): return False # pull and parse standard configuration user = config[DOMAIN][CONF_USERNAME] password = config[DOMAIN][CONF_PASSWORD] host = urlparse(config[DOMAIN][CONF_HOST]) addr = host.geturl() if host.scheme == 'http': addr = addr.replace('http://', '') https = False elif host.scheme == 'https': addr = addr.replace('https://', '') https = True else: _LOGGER.error('isy994 host value in configuration file is invalid.') return False port = host.port addr = addr.replace(':{}'.format(port), '') # pull and parse optional configuration global SENSOR_STRING global HIDDEN_STRING SENSOR_STRING = str(config[DOMAIN].get('sensor_string', SENSOR_STRING)) HIDDEN_STRING = str(config[DOMAIN].get('hidden_string', HIDDEN_STRING)) tls_version = config[DOMAIN].get(CONF_TLS_VER, None) # connect to ISY controller global ISY ISY = PyISY.ISY(addr, port, user, password, use_https=https, tls_ver=tls_version, log=_LOGGER) if not ISY.connected: return False # listen for HA stop to disconnect hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop) # Load components for the devices in the ISY controller that we support for comp_name, discovery in ((('sensor', DISCOVER_SENSORS), ('light', DISCOVER_LIGHTS), ('switch', DISCOVER_SWITCHES))): component = get_component(comp_name) bootstrap.setup_component(hass, component.DOMAIN, config) hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: discovery, ATTR_DISCOVERED: {}}) ISY.auto_update = True return True def stop(event): """ Cleanup the ISY subscription. """ ISY.auto_update = False class ISYDeviceABC(ToggleEntity): """ Abstract Class for an ISY device. """ _attrs = {} _onattrs = [] _states = [] _dtype = None _domain = None _name = None def __init__(self, node): # setup properties self.node = node self.hidden = HIDDEN_STRING in self.raw_name # track changes self._change_handler = self.node.status. \ subscribe('changed', self.on_update) def __del__(self): """ cleanup subscriptions because it is the right thing to do. """ self._change_handler.unsubscribe() @property def domain(self): """ Returns the domain of the entity. """ return self._domain @property def dtype(self): """ Returns the data type of the entity (binary or analog). """ if self._dtype in ['analog', 'binary']: return self._dtype return 'binary' if self.unit_of_measurement is None else 'analog' @property def should_poll(self): """ Tells Home Assistant not to poll this entity. """ return False @property def value(self): """ Returns the unclean value from the controller. """ # pylint: disable=protected-access return self.node.status._val @property def state_attributes(self): """ Returns the state attributes for the node. """ attr = {ATTR_FRIENDLY_NAME: self.name} for name, prop in self._attrs.items(): attr[name] = getattr(self, prop) return attr @property def unique_id(self): """ Returns the id of this ISY sensor. """ # pylint: disable=protected-access return self.node._id @property def raw_name(self): """ Returns the unclean node name. """ return str(self._name) \ if self._name is not None else str(self.node.name) @property def name(self): """ Returns the cleaned name of the node. """ return self.raw_name.replace(HIDDEN_STRING, '').strip() \ .replace('_', ' ') def update(self): """ Update state of the sensor. """ # ISY objects are automatically updated by the ISY's event stream pass def on_update(self, event): """ Handles the update received event. """ self.update_ha_state() @property def is_on(self): """ Returns boolean response if the node is on. """ return bool(self.value) @property def is_open(self): """ Returns boolean respons if the node is open. On = Open. """ return self.is_on @property def state(self): """ Returns the state of the node. """ if len(self._states) > 0: return self._states[0] if self.is_on else self._states[1] return self.value def turn_on(self, **kwargs): """ Turns the device on. """ if self.domain is not 'sensor': attrs = [kwargs.get(name) for name in self._onattrs] self.node.on(*attrs) else: _LOGGER.error('ISY cannot turn on sensors.') def turn_off(self, **kwargs): """ Turns the device off. """ if self.domain is not 'sensor': self.node.off() else: _LOGGER.error('ISY cannot turn off sensors.') @property def unit_of_measurement(self): """ Returns the defined units of measurement or None. """ try: return self.node.units except AttributeError: return None
CCOSTAN/home-assistant
homeassistant/components/isy994.py
Python
mit
7,012
[ "VisIt" ]
998dcfb9656667b70124a2929cdc798a75fe6d11326d25eb91e9c051dd48a3b5
# Author: Felix Wiemann # Contact: Felix_Wiemann@ososo.de # Revision: $Revision: 21817 $ # Date: $Date: 2005-07-21 13:39:57 -0700 (Thu, 21 Jul 2005) $ # Copyright: This file has been placed in the public domain. # This is a mapping of Unicode characters to LaTeX equivalents. # The information has been extracted from # <http://www.w3.org/2003/entities/xml/unicode.xml>, written by # David Carlisle and Sebastian Rahtz. # # The extraction has been done by the "create_unimap.py" script # located at <http://docutils.sf.net/tools/dev/create_unimap.py>. unicode_map = {u'\xa0': '$~$', u'\xa1': '{\\textexclamdown}', u'\xa2': '{\\textcent}', u'\xa3': '{\\textsterling}', u'\xa4': '{\\textcurrency}', u'\xa5': '{\\textyen}', u'\xa6': '{\\textbrokenbar}', u'\xa7': '{\\textsection}', u'\xa8': '{\\textasciidieresis}', u'\xa9': '{\\textcopyright}', u'\xaa': '{\\textordfeminine}', u'\xab': '{\\guillemotleft}', u'\xac': '$\\lnot$', u'\xad': '$\\-$', u'\xae': '{\\textregistered}', u'\xaf': '{\\textasciimacron}', u'\xb0': '{\\textdegree}', u'\xb1': '$\\pm$', u'\xb2': '${^2}$', u'\xb3': '${^3}$', u'\xb4': '{\\textasciiacute}', u'\xb5': '$\\mathrm{\\mu}$', u'\xb6': '{\\textparagraph}', u'\xb7': '$\\cdot$', u'\xb8': '{\\c{}}', u'\xb9': '${^1}$', u'\xba': '{\\textordmasculine}', u'\xbb': '{\\guillemotright}', u'\xbc': '{\\textonequarter}', u'\xbd': '{\\textonehalf}', u'\xbe': '{\\textthreequarters}', u'\xbf': '{\\textquestiondown}', u'\xc0': '{\\`{A}}', u'\xc1': "{\\'{A}}", u'\xc2': '{\\^{A}}', u'\xc3': '{\\~{A}}', u'\xc4': '{\\"{A}}', u'\xc5': '{\\AA}', u'\xc6': '{\\AE}', u'\xc7': '{\\c{C}}', u'\xc8': '{\\`{E}}', u'\xc9': "{\\'{E}}", u'\xca': '{\\^{E}}', u'\xcb': '{\\"{E}}', u'\xcc': '{\\`{I}}', u'\xcd': "{\\'{I}}", u'\xce': '{\\^{I}}', u'\xcf': '{\\"{I}}', u'\xd0': '{\\DH}', u'\xd1': '{\\~{N}}', u'\xd2': '{\\`{O}}', u'\xd3': "{\\'{O}}", u'\xd4': '{\\^{O}}', u'\xd5': '{\\~{O}}', u'\xd6': '{\\"{O}}', u'\xd7': '{\\texttimes}', u'\xd8': '{\\O}', u'\xd9': '{\\`{U}}', u'\xda': "{\\'{U}}", u'\xdb': '{\\^{U}}', u'\xdc': '{\\"{U}}', u'\xdd': "{\\'{Y}}", u'\xde': '{\\TH}', u'\xdf': '{\\ss}', u'\xe0': '{\\`{a}}', u'\xe1': "{\\'{a}}", u'\xe2': '{\\^{a}}', u'\xe3': '{\\~{a}}', u'\xe4': '{\\"{a}}', u'\xe5': '{\\aa}', u'\xe6': '{\\ae}', u'\xe7': '{\\c{c}}', u'\xe8': '{\\`{e}}', u'\xe9': "{\\'{e}}", u'\xea': '{\\^{e}}', u'\xeb': '{\\"{e}}', u'\xec': '{\\`{\\i}}', u'\xed': "{\\'{\\i}}", u'\xee': '{\\^{\\i}}', u'\xef': '{\\"{\\i}}', u'\xf0': '{\\dh}', u'\xf1': '{\\~{n}}', u'\xf2': '{\\`{o}}', u'\xf3': "{\\'{o}}", u'\xf4': '{\\^{o}}', u'\xf5': '{\\~{o}}', u'\xf6': '{\\"{o}}', u'\xf7': '$\\div$', u'\xf8': '{\\o}', u'\xf9': '{\\`{u}}', u'\xfa': "{\\'{u}}", u'\xfb': '{\\^{u}}', u'\xfc': '{\\"{u}}', u'\xfd': "{\\'{y}}", u'\xfe': '{\\th}', u'\xff': '{\\"{y}}', u'\u0100': '{\\={A}}', u'\u0101': '{\\={a}}', u'\u0102': '{\\u{A}}', u'\u0103': '{\\u{a}}', u'\u0104': '{\\k{A}}', u'\u0105': '{\\k{a}}', u'\u0106': "{\\'{C}}", u'\u0107': "{\\'{c}}", u'\u0108': '{\\^{C}}', u'\u0109': '{\\^{c}}', u'\u010a': '{\\.{C}}', u'\u010b': '{\\.{c}}', u'\u010c': '{\\v{C}}', u'\u010d': '{\\v{c}}', u'\u010e': '{\\v{D}}', u'\u010f': '{\\v{d}}', u'\u0110': '{\\DJ}', u'\u0111': '{\\dj}', u'\u0112': '{\\={E}}', u'\u0113': '{\\={e}}', u'\u0114': '{\\u{E}}', u'\u0115': '{\\u{e}}', u'\u0116': '{\\.{E}}', u'\u0117': '{\\.{e}}', u'\u0118': '{\\k{E}}', u'\u0119': '{\\k{e}}', u'\u011a': '{\\v{E}}', u'\u011b': '{\\v{e}}', u'\u011c': '{\\^{G}}', u'\u011d': '{\\^{g}}', u'\u011e': '{\\u{G}}', u'\u011f': '{\\u{g}}', u'\u0120': '{\\.{G}}', u'\u0121': '{\\.{g}}', u'\u0122': '{\\c{G}}', u'\u0123': '{\\c{g}}', u'\u0124': '{\\^{H}}', u'\u0125': '{\\^{h}}', u'\u0126': '{{\\fontencoding{LELA}\\selectfont\\char40}}', u'\u0127': '$\\Elzxh$', u'\u0128': '{\\~{I}}', u'\u0129': '{\\~{\\i}}', u'\u012a': '{\\={I}}', u'\u012b': '{\\={\\i}}', u'\u012c': '{\\u{I}}', u'\u012d': '{\\u{\\i}}', u'\u012e': '{\\k{I}}', u'\u012f': '{\\k{i}}', u'\u0130': '{\\.{I}}', u'\u0131': '{\\i}', u'\u0132': '{IJ}', u'\u0133': '{ij}', u'\u0134': '{\\^{J}}', u'\u0135': '{\\^{\\j}}', u'\u0136': '{\\c{K}}', u'\u0137': '{\\c{k}}', u'\u0138': '{{\\fontencoding{LELA}\\selectfont\\char91}}', u'\u0139': "{\\'{L}}", u'\u013a': "{\\'{l}}", u'\u013b': '{\\c{L}}', u'\u013c': '{\\c{l}}', u'\u013d': '{\\v{L}}', u'\u013e': '{\\v{l}}', u'\u013f': '{{\\fontencoding{LELA}\\selectfont\\char201}}', u'\u0140': '{{\\fontencoding{LELA}\\selectfont\\char202}}', u'\u0141': '{\\L}', u'\u0142': '{\\l}', u'\u0143': "{\\'{N}}", u'\u0144': "{\\'{n}}", u'\u0145': '{\\c{N}}', u'\u0146': '{\\c{n}}', u'\u0147': '{\\v{N}}', u'\u0148': '{\\v{n}}', u'\u0149': "{'n}", u'\u014a': '{\\NG}', u'\u014b': '{\\ng}', u'\u014c': '{\\={O}}', u'\u014d': '{\\={o}}', u'\u014e': '{\\u{O}}', u'\u014f': '{\\u{o}}', u'\u0150': '{\\H{O}}', u'\u0151': '{\\H{o}}', u'\u0152': '{\\OE}', u'\u0153': '{\\oe}', u'\u0154': "{\\'{R}}", u'\u0155': "{\\'{r}}", u'\u0156': '{\\c{R}}', u'\u0157': '{\\c{r}}', u'\u0158': '{\\v{R}}', u'\u0159': '{\\v{r}}', u'\u015a': "{\\'{S}}", u'\u015b': "{\\'{s}}", u'\u015c': '{\\^{S}}', u'\u015d': '{\\^{s}}', u'\u015e': '{\\c{S}}', u'\u015f': '{\\c{s}}', u'\u0160': '{\\v{S}}', u'\u0161': '{\\v{s}}', u'\u0162': '{\\c{T}}', u'\u0163': '{\\c{t}}', u'\u0164': '{\\v{T}}', u'\u0165': '{\\v{t}}', u'\u0166': '{{\\fontencoding{LELA}\\selectfont\\char47}}', u'\u0167': '{{\\fontencoding{LELA}\\selectfont\\char63}}', u'\u0168': '{\\~{U}}', u'\u0169': '{\\~{u}}', u'\u016a': '{\\={U}}', u'\u016b': '{\\={u}}', u'\u016c': '{\\u{U}}', u'\u016d': '{\\u{u}}', u'\u016e': '{\\r{U}}', u'\u016f': '{\\r{u}}', u'\u0170': '{\\H{U}}', u'\u0171': '{\\H{u}}', u'\u0172': '{\\k{U}}', u'\u0173': '{\\k{u}}', u'\u0174': '{\\^{W}}', u'\u0175': '{\\^{w}}', u'\u0176': '{\\^{Y}}', u'\u0177': '{\\^{y}}', u'\u0178': '{\\"{Y}}', u'\u0179': "{\\'{Z}}", u'\u017a': "{\\'{z}}", u'\u017b': '{\\.{Z}}', u'\u017c': '{\\.{z}}', u'\u017d': '{\\v{Z}}', u'\u017e': '{\\v{z}}', u'\u0192': '$f$', u'\u0195': '{\\texthvlig}', u'\u019e': '{\\textnrleg}', u'\u01aa': '$\\eth$', u'\u01ba': '{{\\fontencoding{LELA}\\selectfont\\char195}}', u'\u01c2': '{\\textdoublepipe}', u'\u01f5': "{\\'{g}}", u'\u0250': '$\\Elztrna$', u'\u0252': '$\\Elztrnsa$', u'\u0254': '$\\Elzopeno$', u'\u0256': '$\\Elzrtld$', u'\u0258': '{{\\fontencoding{LEIP}\\selectfont\\char61}}', u'\u0259': '$\\Elzschwa$', u'\u025b': '$\\varepsilon$', u'\u0261': '{g}', u'\u0263': '$\\Elzpgamma$', u'\u0264': '$\\Elzpbgam$', u'\u0265': '$\\Elztrnh$', u'\u026c': '$\\Elzbtdl$', u'\u026d': '$\\Elzrtll$', u'\u026f': '$\\Elztrnm$', u'\u0270': '$\\Elztrnmlr$', u'\u0271': '$\\Elzltlmr$', u'\u0272': '{\\Elzltln}', u'\u0273': '$\\Elzrtln$', u'\u0277': '$\\Elzclomeg$', u'\u0278': '{\\textphi}', u'\u0279': '$\\Elztrnr$', u'\u027a': '$\\Elztrnrl$', u'\u027b': '$\\Elzrttrnr$', u'\u027c': '$\\Elzrl$', u'\u027d': '$\\Elzrtlr$', u'\u027e': '$\\Elzfhr$', u'\u027f': '{{\\fontencoding{LEIP}\\selectfont\\char202}}', u'\u0282': '$\\Elzrtls$', u'\u0283': '$\\Elzesh$', u'\u0287': '$\\Elztrnt$', u'\u0288': '$\\Elzrtlt$', u'\u028a': '$\\Elzpupsil$', u'\u028b': '$\\Elzpscrv$', u'\u028c': '$\\Elzinvv$', u'\u028d': '$\\Elzinvw$', u'\u028e': '$\\Elztrny$', u'\u0290': '$\\Elzrtlz$', u'\u0292': '$\\Elzyogh$', u'\u0294': '$\\Elzglst$', u'\u0295': '$\\Elzreglst$', u'\u0296': '$\\Elzinglst$', u'\u029e': '{\\textturnk}', u'\u02a4': '$\\Elzdyogh$', u'\u02a7': '$\\Elztesh$', u'\u02bc': "{'}", u'\u02c7': '{\\textasciicaron}', u'\u02c8': '$\\Elzverts$', u'\u02cc': '$\\Elzverti$', u'\u02d0': '$\\Elzlmrk$', u'\u02d1': '$\\Elzhlmrk$', u'\u02d2': '$\\Elzsbrhr$', u'\u02d3': '$\\Elzsblhr$', u'\u02d4': '$\\Elzrais$', u'\u02d5': '$\\Elzlow$', u'\u02d8': '{\\textasciibreve}', u'\u02d9': '{\\textperiodcentered}', u'\u02da': '{\\r{}}', u'\u02db': '{\\k{}}', u'\u02dc': '{\\texttildelow}', u'\u02dd': '{\\H{}}', u'\u02e5': '{\\tone{55}}', u'\u02e6': '{\\tone{44}}', u'\u02e7': '{\\tone{33}}', u'\u02e8': '{\\tone{22}}', u'\u02e9': '{\\tone{11}}', u'\u0300': '{\\`}', u'\u0301': "{\\'}", u'\u0302': '{\\^}', u'\u0303': '{\\~}', u'\u0304': '{\\=}', u'\u0306': '{\\u}', u'\u0307': '{\\.}', u'\u0308': '{\\"}', u'\u030a': '{\\r}', u'\u030b': '{\\H}', u'\u030c': '{\\v}', u'\u030f': '{\\cyrchar\\C}', u'\u0311': '{{\\fontencoding{LECO}\\selectfont\\char177}}', u'\u0318': '{{\\fontencoding{LECO}\\selectfont\\char184}}', u'\u0319': '{{\\fontencoding{LECO}\\selectfont\\char185}}', u'\u0321': '$\\Elzpalh$', u'\u0322': '{\\Elzrh}', u'\u0327': '{\\c}', u'\u0328': '{\\k}', u'\u032a': '$\\Elzsbbrg$', u'\u032b': '{{\\fontencoding{LECO}\\selectfont\\char203}}', u'\u032f': '{{\\fontencoding{LECO}\\selectfont\\char207}}', u'\u0335': '{\\Elzxl}', u'\u0336': '{\\Elzbar}', u'\u0337': '{{\\fontencoding{LECO}\\selectfont\\char215}}', u'\u0338': '{{\\fontencoding{LECO}\\selectfont\\char216}}', u'\u033a': '{{\\fontencoding{LECO}\\selectfont\\char218}}', u'\u033b': '{{\\fontencoding{LECO}\\selectfont\\char219}}', u'\u033c': '{{\\fontencoding{LECO}\\selectfont\\char220}}', u'\u033d': '{{\\fontencoding{LECO}\\selectfont\\char221}}', u'\u0361': '{{\\fontencoding{LECO}\\selectfont\\char225}}', u'\u0386': "{\\'{A}}", u'\u0388': "{\\'{E}}", u'\u0389': "{\\'{H}}", u'\u038a': "{\\'{}{I}}", u'\u038c': "{\\'{}O}", u'\u038e': "$\\mathrm{'Y}$", u'\u038f': "$\\mathrm{'\\Omega}$", u'\u0390': '$\\acute{\\ddot{\\iota}}$', u'\u0391': '$\\Alpha$', u'\u0392': '$\\Beta$', u'\u0393': '$\\Gamma$', u'\u0394': '$\\Delta$', u'\u0395': '$\\Epsilon$', u'\u0396': '$\\Zeta$', u'\u0397': '$\\Eta$', u'\u0398': '$\\Theta$', u'\u0399': '$\\Iota$', u'\u039a': '$\\Kappa$', u'\u039b': '$\\Lambda$', u'\u039c': '$M$', u'\u039d': '$N$', u'\u039e': '$\\Xi$', u'\u039f': '$O$', u'\u03a0': '$\\Pi$', u'\u03a1': '$\\Rho$', u'\u03a3': '$\\Sigma$', u'\u03a4': '$\\Tau$', u'\u03a5': '$\\Upsilon$', u'\u03a6': '$\\Phi$', u'\u03a7': '$\\Chi$', u'\u03a8': '$\\Psi$', u'\u03a9': '$\\Omega$', u'\u03aa': '$\\mathrm{\\ddot{I}}$', u'\u03ab': '$\\mathrm{\\ddot{Y}}$', u'\u03ac': "{\\'{$\\alpha$}}", u'\u03ad': '$\\acute{\\epsilon}$', u'\u03ae': '$\\acute{\\eta}$', u'\u03af': '$\\acute{\\iota}$', u'\u03b0': '$\\acute{\\ddot{\\upsilon}}$', u'\u03b1': '$\\alpha$', u'\u03b2': '$\\beta$', u'\u03b3': '$\\gamma$', u'\u03b4': '$\\delta$', u'\u03b5': '$\\epsilon$', u'\u03b6': '$\\zeta$', u'\u03b7': '$\\eta$', u'\u03b8': '{\\texttheta}', u'\u03b9': '$\\iota$', u'\u03ba': '$\\kappa$', u'\u03bb': '$\\lambda$', u'\u03bc': '$\\mu$', u'\u03bd': '$\\nu$', u'\u03be': '$\\xi$', u'\u03bf': '$o$', u'\u03c0': '$\\pi$', u'\u03c1': '$\\rho$', u'\u03c2': '$\\varsigma$', u'\u03c3': '$\\sigma$', u'\u03c4': '$\\tau$', u'\u03c5': '$\\upsilon$', u'\u03c6': '$\\varphi$', u'\u03c7': '$\\chi$', u'\u03c8': '$\\psi$', u'\u03c9': '$\\omega$', u'\u03ca': '$\\ddot{\\iota}$', u'\u03cb': '$\\ddot{\\upsilon}$', u'\u03cc': "{\\'{o}}", u'\u03cd': '$\\acute{\\upsilon}$', u'\u03ce': '$\\acute{\\omega}$', u'\u03d0': '{\\Pisymbol{ppi022}{87}}', u'\u03d1': '{\\textvartheta}', u'\u03d2': '$\\Upsilon$', u'\u03d5': '$\\phi$', u'\u03d6': '$\\varpi$', u'\u03da': '$\\Stigma$', u'\u03dc': '$\\Digamma$', u'\u03dd': '$\\digamma$', u'\u03de': '$\\Koppa$', u'\u03e0': '$\\Sampi$', u'\u03f0': '$\\varkappa$', u'\u03f1': '$\\varrho$', u'\u03f4': '{\\textTheta}', u'\u03f6': '$\\backepsilon$', u'\u0401': '{\\cyrchar\\CYRYO}', u'\u0402': '{\\cyrchar\\CYRDJE}', u'\u0403': "{\\cyrchar{\\'\\CYRG}}", u'\u0404': '{\\cyrchar\\CYRIE}', u'\u0405': '{\\cyrchar\\CYRDZE}', u'\u0406': '{\\cyrchar\\CYRII}', u'\u0407': '{\\cyrchar\\CYRYI}', u'\u0408': '{\\cyrchar\\CYRJE}', u'\u0409': '{\\cyrchar\\CYRLJE}', u'\u040a': '{\\cyrchar\\CYRNJE}', u'\u040b': '{\\cyrchar\\CYRTSHE}', u'\u040c': "{\\cyrchar{\\'\\CYRK}}", u'\u040e': '{\\cyrchar\\CYRUSHRT}', u'\u040f': '{\\cyrchar\\CYRDZHE}', u'\u0410': '{\\cyrchar\\CYRA}', u'\u0411': '{\\cyrchar\\CYRB}', u'\u0412': '{\\cyrchar\\CYRV}', u'\u0413': '{\\cyrchar\\CYRG}', u'\u0414': '{\\cyrchar\\CYRD}', u'\u0415': '{\\cyrchar\\CYRE}', u'\u0416': '{\\cyrchar\\CYRZH}', u'\u0417': '{\\cyrchar\\CYRZ}', u'\u0418': '{\\cyrchar\\CYRI}', u'\u0419': '{\\cyrchar\\CYRISHRT}', u'\u041a': '{\\cyrchar\\CYRK}', u'\u041b': '{\\cyrchar\\CYRL}', u'\u041c': '{\\cyrchar\\CYRM}', u'\u041d': '{\\cyrchar\\CYRN}', u'\u041e': '{\\cyrchar\\CYRO}', u'\u041f': '{\\cyrchar\\CYRP}', u'\u0420': '{\\cyrchar\\CYRR}', u'\u0421': '{\\cyrchar\\CYRS}', u'\u0422': '{\\cyrchar\\CYRT}', u'\u0423': '{\\cyrchar\\CYRU}', u'\u0424': '{\\cyrchar\\CYRF}', u'\u0425': '{\\cyrchar\\CYRH}', u'\u0426': '{\\cyrchar\\CYRC}', u'\u0427': '{\\cyrchar\\CYRCH}', u'\u0428': '{\\cyrchar\\CYRSH}', u'\u0429': '{\\cyrchar\\CYRSHCH}', u'\u042a': '{\\cyrchar\\CYRHRDSN}', u'\u042b': '{\\cyrchar\\CYRERY}', u'\u042c': '{\\cyrchar\\CYRSFTSN}', u'\u042d': '{\\cyrchar\\CYREREV}', u'\u042e': '{\\cyrchar\\CYRYU}', u'\u042f': '{\\cyrchar\\CYRYA}', u'\u0430': '{\\cyrchar\\cyra}', u'\u0431': '{\\cyrchar\\cyrb}', u'\u0432': '{\\cyrchar\\cyrv}', u'\u0433': '{\\cyrchar\\cyrg}', u'\u0434': '{\\cyrchar\\cyrd}', u'\u0435': '{\\cyrchar\\cyre}', u'\u0436': '{\\cyrchar\\cyrzh}', u'\u0437': '{\\cyrchar\\cyrz}', u'\u0438': '{\\cyrchar\\cyri}', u'\u0439': '{\\cyrchar\\cyrishrt}', u'\u043a': '{\\cyrchar\\cyrk}', u'\u043b': '{\\cyrchar\\cyrl}', u'\u043c': '{\\cyrchar\\cyrm}', u'\u043d': '{\\cyrchar\\cyrn}', u'\u043e': '{\\cyrchar\\cyro}', u'\u043f': '{\\cyrchar\\cyrp}', u'\u0440': '{\\cyrchar\\cyrr}', u'\u0441': '{\\cyrchar\\cyrs}', u'\u0442': '{\\cyrchar\\cyrt}', u'\u0443': '{\\cyrchar\\cyru}', u'\u0444': '{\\cyrchar\\cyrf}', u'\u0445': '{\\cyrchar\\cyrh}', u'\u0446': '{\\cyrchar\\cyrc}', u'\u0447': '{\\cyrchar\\cyrch}', u'\u0448': '{\\cyrchar\\cyrsh}', u'\u0449': '{\\cyrchar\\cyrshch}', u'\u044a': '{\\cyrchar\\cyrhrdsn}', u'\u044b': '{\\cyrchar\\cyrery}', u'\u044c': '{\\cyrchar\\cyrsftsn}', u'\u044d': '{\\cyrchar\\cyrerev}', u'\u044e': '{\\cyrchar\\cyryu}', u'\u044f': '{\\cyrchar\\cyrya}', u'\u0451': '{\\cyrchar\\cyryo}', u'\u0452': '{\\cyrchar\\cyrdje}', u'\u0453': "{\\cyrchar{\\'\\cyrg}}", u'\u0454': '{\\cyrchar\\cyrie}', u'\u0455': '{\\cyrchar\\cyrdze}', u'\u0456': '{\\cyrchar\\cyrii}', u'\u0457': '{\\cyrchar\\cyryi}', u'\u0458': '{\\cyrchar\\cyrje}', u'\u0459': '{\\cyrchar\\cyrlje}', u'\u045a': '{\\cyrchar\\cyrnje}', u'\u045b': '{\\cyrchar\\cyrtshe}', u'\u045c': "{\\cyrchar{\\'\\cyrk}}", u'\u045e': '{\\cyrchar\\cyrushrt}', u'\u045f': '{\\cyrchar\\cyrdzhe}', u'\u0460': '{\\cyrchar\\CYROMEGA}', u'\u0461': '{\\cyrchar\\cyromega}', u'\u0462': '{\\cyrchar\\CYRYAT}', u'\u0464': '{\\cyrchar\\CYRIOTE}', u'\u0465': '{\\cyrchar\\cyriote}', u'\u0466': '{\\cyrchar\\CYRLYUS}', u'\u0467': '{\\cyrchar\\cyrlyus}', u'\u0468': '{\\cyrchar\\CYRIOTLYUS}', u'\u0469': '{\\cyrchar\\cyriotlyus}', u'\u046a': '{\\cyrchar\\CYRBYUS}', u'\u046c': '{\\cyrchar\\CYRIOTBYUS}', u'\u046d': '{\\cyrchar\\cyriotbyus}', u'\u046e': '{\\cyrchar\\CYRKSI}', u'\u046f': '{\\cyrchar\\cyrksi}', u'\u0470': '{\\cyrchar\\CYRPSI}', u'\u0471': '{\\cyrchar\\cyrpsi}', u'\u0472': '{\\cyrchar\\CYRFITA}', u'\u0474': '{\\cyrchar\\CYRIZH}', u'\u0478': '{\\cyrchar\\CYRUK}', u'\u0479': '{\\cyrchar\\cyruk}', u'\u047a': '{\\cyrchar\\CYROMEGARND}', u'\u047b': '{\\cyrchar\\cyromegarnd}', u'\u047c': '{\\cyrchar\\CYROMEGATITLO}', u'\u047d': '{\\cyrchar\\cyromegatitlo}', u'\u047e': '{\\cyrchar\\CYROT}', u'\u047f': '{\\cyrchar\\cyrot}', u'\u0480': '{\\cyrchar\\CYRKOPPA}', u'\u0481': '{\\cyrchar\\cyrkoppa}', u'\u0482': '{\\cyrchar\\cyrthousands}', u'\u0488': '{\\cyrchar\\cyrhundredthousands}', u'\u0489': '{\\cyrchar\\cyrmillions}', u'\u048c': '{\\cyrchar\\CYRSEMISFTSN}', u'\u048d': '{\\cyrchar\\cyrsemisftsn}', u'\u048e': '{\\cyrchar\\CYRRTICK}', u'\u048f': '{\\cyrchar\\cyrrtick}', u'\u0490': '{\\cyrchar\\CYRGUP}', u'\u0491': '{\\cyrchar\\cyrgup}', u'\u0492': '{\\cyrchar\\CYRGHCRS}', u'\u0493': '{\\cyrchar\\cyrghcrs}', u'\u0494': '{\\cyrchar\\CYRGHK}', u'\u0495': '{\\cyrchar\\cyrghk}', u'\u0496': '{\\cyrchar\\CYRZHDSC}', u'\u0497': '{\\cyrchar\\cyrzhdsc}', u'\u0498': '{\\cyrchar\\CYRZDSC}', u'\u0499': '{\\cyrchar\\cyrzdsc}', u'\u049a': '{\\cyrchar\\CYRKDSC}', u'\u049b': '{\\cyrchar\\cyrkdsc}', u'\u049c': '{\\cyrchar\\CYRKVCRS}', u'\u049d': '{\\cyrchar\\cyrkvcrs}', u'\u049e': '{\\cyrchar\\CYRKHCRS}', u'\u049f': '{\\cyrchar\\cyrkhcrs}', u'\u04a0': '{\\cyrchar\\CYRKBEAK}', u'\u04a1': '{\\cyrchar\\cyrkbeak}', u'\u04a2': '{\\cyrchar\\CYRNDSC}', u'\u04a3': '{\\cyrchar\\cyrndsc}', u'\u04a4': '{\\cyrchar\\CYRNG}', u'\u04a5': '{\\cyrchar\\cyrng}', u'\u04a6': '{\\cyrchar\\CYRPHK}', u'\u04a7': '{\\cyrchar\\cyrphk}', u'\u04a8': '{\\cyrchar\\CYRABHHA}', u'\u04a9': '{\\cyrchar\\cyrabhha}', u'\u04aa': '{\\cyrchar\\CYRSDSC}', u'\u04ab': '{\\cyrchar\\cyrsdsc}', u'\u04ac': '{\\cyrchar\\CYRTDSC}', u'\u04ad': '{\\cyrchar\\cyrtdsc}', u'\u04ae': '{\\cyrchar\\CYRY}', u'\u04af': '{\\cyrchar\\cyry}', u'\u04b0': '{\\cyrchar\\CYRYHCRS}', u'\u04b1': '{\\cyrchar\\cyryhcrs}', u'\u04b2': '{\\cyrchar\\CYRHDSC}', u'\u04b3': '{\\cyrchar\\cyrhdsc}', u'\u04b4': '{\\cyrchar\\CYRTETSE}', u'\u04b5': '{\\cyrchar\\cyrtetse}', u'\u04b6': '{\\cyrchar\\CYRCHRDSC}', u'\u04b7': '{\\cyrchar\\cyrchrdsc}', u'\u04b8': '{\\cyrchar\\CYRCHVCRS}', u'\u04b9': '{\\cyrchar\\cyrchvcrs}', u'\u04ba': '{\\cyrchar\\CYRSHHA}', u'\u04bb': '{\\cyrchar\\cyrshha}', u'\u04bc': '{\\cyrchar\\CYRABHCH}', u'\u04bd': '{\\cyrchar\\cyrabhch}', u'\u04be': '{\\cyrchar\\CYRABHCHDSC}', u'\u04bf': '{\\cyrchar\\cyrabhchdsc}', u'\u04c0': '{\\cyrchar\\CYRpalochka}', u'\u04c3': '{\\cyrchar\\CYRKHK}', u'\u04c4': '{\\cyrchar\\cyrkhk}', u'\u04c7': '{\\cyrchar\\CYRNHK}', u'\u04c8': '{\\cyrchar\\cyrnhk}', u'\u04cb': '{\\cyrchar\\CYRCHLDSC}', u'\u04cc': '{\\cyrchar\\cyrchldsc}', u'\u04d4': '{\\cyrchar\\CYRAE}', u'\u04d5': '{\\cyrchar\\cyrae}', u'\u04d8': '{\\cyrchar\\CYRSCHWA}', u'\u04d9': '{\\cyrchar\\cyrschwa}', u'\u04e0': '{\\cyrchar\\CYRABHDZE}', u'\u04e1': '{\\cyrchar\\cyrabhdze}', u'\u04e8': '{\\cyrchar\\CYROTLD}', u'\u04e9': '{\\cyrchar\\cyrotld}', u'\u2002': '{\\hspace{0.6em}}', u'\u2003': '{\\hspace{1em}}', u'\u2004': '{\\hspace{0.33em}}', u'\u2005': '{\\hspace{0.25em}}', u'\u2006': '{\\hspace{0.166em}}', u'\u2007': '{\\hphantom{0}}', u'\u2008': '{\\hphantom{,}}', u'\u2009': '{\\hspace{0.167em}}', u'\u200a': '$\\mkern1mu$', u'\u2010': '{-}', u'\u2013': '{\\textendash}', u'\u2014': '{\\textemdash}', u'\u2015': '{\\rule{1em}{1pt}}', u'\u2016': '$\\Vert$', u'\u2018': '{`}', u'\u2019': "{'}", u'\u201a': '{,}', u'\u201b': '$\\Elzreapos$', u'\u201c': '{\\textquotedblleft}', u'\u201d': '{\\textquotedblright}', u'\u201e': '{,,}', u'\u2020': '{\\textdagger}', u'\u2021': '{\\textdaggerdbl}', u'\u2022': '{\\textbullet}', u'\u2024': '{.}', u'\u2025': '{..}', u'\u2026': '{\\ldots}', u'\u2030': '{\\textperthousand}', u'\u2031': '{\\textpertenthousand}', u'\u2032': "${'}$", u'\u2033': "${''}$", u'\u2034': "${'''}$", u'\u2035': '$\\backprime$', u'\u2039': '{\\guilsinglleft}', u'\u203a': '{\\guilsinglright}', u'\u2057': "$''''$", u'\u205f': '{\\mkern4mu}', u'\u2060': '{\\nolinebreak}', u'\u20a7': '{\\ensuremath{\\Elzpes}}', u'\u20ac': '{\\mbox{\\texteuro}}', u'\u20db': '$\\dddot$', u'\u20dc': '$\\ddddot$', u'\u2102': '$\\mathbb{C}$', u'\u210a': '{\\mathscr{g}}', u'\u210b': '$\\mathscr{H}$', u'\u210c': '$\\mathfrak{H}$', u'\u210d': '$\\mathbb{H}$', u'\u210f': '$\\hslash$', u'\u2110': '$\\mathscr{I}$', u'\u2111': '$\\mathfrak{I}$', u'\u2112': '$\\mathscr{L}$', u'\u2113': '$\\mathscr{l}$', u'\u2115': '$\\mathbb{N}$', u'\u2116': '{\\cyrchar\\textnumero}', u'\u2118': '$\\wp$', u'\u2119': '$\\mathbb{P}$', u'\u211a': '$\\mathbb{Q}$', u'\u211b': '$\\mathscr{R}$', u'\u211c': '$\\mathfrak{R}$', u'\u211d': '$\\mathbb{R}$', u'\u211e': '$\\Elzxrat$', u'\u2122': '{\\texttrademark}', u'\u2124': '$\\mathbb{Z}$', u'\u2126': '$\\Omega$', u'\u2127': '$\\mho$', u'\u2128': '$\\mathfrak{Z}$', u'\u2129': '$\\ElsevierGlyph{2129}$', u'\u212b': '{\\AA}', u'\u212c': '$\\mathscr{B}$', u'\u212d': '$\\mathfrak{C}$', u'\u212f': '$\\mathscr{e}$', u'\u2130': '$\\mathscr{E}$', u'\u2131': '$\\mathscr{F}$', u'\u2133': '$\\mathscr{M}$', u'\u2134': '$\\mathscr{o}$', u'\u2135': '$\\aleph$', u'\u2136': '$\\beth$', u'\u2137': '$\\gimel$', u'\u2138': '$\\daleth$', u'\u2153': '$\\textfrac{1}{3}$', u'\u2154': '$\\textfrac{2}{3}$', u'\u2155': '$\\textfrac{1}{5}$', u'\u2156': '$\\textfrac{2}{5}$', u'\u2157': '$\\textfrac{3}{5}$', u'\u2158': '$\\textfrac{4}{5}$', u'\u2159': '$\\textfrac{1}{6}$', u'\u215a': '$\\textfrac{5}{6}$', u'\u215b': '$\\textfrac{1}{8}$', u'\u215c': '$\\textfrac{3}{8}$', u'\u215d': '$\\textfrac{5}{8}$', u'\u215e': '$\\textfrac{7}{8}$', u'\u2190': '$\\leftarrow$', u'\u2191': '$\\uparrow$', u'\u2192': '$\\rightarrow$', u'\u2193': '$\\downarrow$', u'\u2194': '$\\leftrightarrow$', u'\u2195': '$\\updownarrow$', u'\u2196': '$\\nwarrow$', u'\u2197': '$\\nearrow$', u'\u2198': '$\\searrow$', u'\u2199': '$\\swarrow$', u'\u219a': '$\\nleftarrow$', u'\u219b': '$\\nrightarrow$', u'\u219c': '$\\arrowwaveright$', u'\u219d': '$\\arrowwaveright$', u'\u219e': '$\\twoheadleftarrow$', u'\u21a0': '$\\twoheadrightarrow$', u'\u21a2': '$\\leftarrowtail$', u'\u21a3': '$\\rightarrowtail$', u'\u21a6': '$\\mapsto$', u'\u21a9': '$\\hookleftarrow$', u'\u21aa': '$\\hookrightarrow$', u'\u21ab': '$\\looparrowleft$', u'\u21ac': '$\\looparrowright$', u'\u21ad': '$\\leftrightsquigarrow$', u'\u21ae': '$\\nleftrightarrow$', u'\u21b0': '$\\Lsh$', u'\u21b1': '$\\Rsh$', u'\u21b3': '$\\ElsevierGlyph{21B3}$', u'\u21b6': '$\\curvearrowleft$', u'\u21b7': '$\\curvearrowright$', u'\u21ba': '$\\circlearrowleft$', u'\u21bb': '$\\circlearrowright$', u'\u21bc': '$\\leftharpoonup$', u'\u21bd': '$\\leftharpoondown$', u'\u21be': '$\\upharpoonright$', u'\u21bf': '$\\upharpoonleft$', u'\u21c0': '$\\rightharpoonup$', u'\u21c1': '$\\rightharpoondown$', u'\u21c2': '$\\downharpoonright$', u'\u21c3': '$\\downharpoonleft$', u'\u21c4': '$\\rightleftarrows$', u'\u21c5': '$\\dblarrowupdown$', u'\u21c6': '$\\leftrightarrows$', u'\u21c7': '$\\leftleftarrows$', u'\u21c8': '$\\upuparrows$', u'\u21c9': '$\\rightrightarrows$', u'\u21ca': '$\\downdownarrows$', u'\u21cb': '$\\leftrightharpoons$', u'\u21cc': '$\\rightleftharpoons$', u'\u21cd': '$\\nLeftarrow$', u'\u21ce': '$\\nLeftrightarrow$', u'\u21cf': '$\\nRightarrow$', u'\u21d0': '$\\Leftarrow$', u'\u21d1': '$\\Uparrow$', u'\u21d2': '$\\Rightarrow$', u'\u21d3': '$\\Downarrow$', u'\u21d4': '$\\Leftrightarrow$', u'\u21d5': '$\\Updownarrow$', u'\u21da': '$\\Lleftarrow$', u'\u21db': '$\\Rrightarrow$', u'\u21dd': '$\\rightsquigarrow$', u'\u21f5': '$\\DownArrowUpArrow$', u'\u2200': '$\\forall$', u'\u2201': '$\\complement$', u'\u2202': '$\\partial$', u'\u2203': '$\\exists$', u'\u2204': '$\\nexists$', u'\u2205': '$\\varnothing$', u'\u2207': '$\\nabla$', u'\u2208': '$\\in$', u'\u2209': '$\\not\\in$', u'\u220b': '$\\ni$', u'\u220c': '$\\not\\ni$', u'\u220f': '$\\prod$', u'\u2210': '$\\coprod$', u'\u2211': '$\\sum$', u'\u2212': '{-}', u'\u2213': '$\\mp$', u'\u2214': '$\\dotplus$', u'\u2216': '$\\setminus$', u'\u2217': '${_\\ast}$', u'\u2218': '$\\circ$', u'\u2219': '$\\bullet$', u'\u221a': '$\\surd$', u'\u221d': '$\\propto$', u'\u221e': '$\\infty$', u'\u221f': '$\\rightangle$', u'\u2220': '$\\angle$', u'\u2221': '$\\measuredangle$', u'\u2222': '$\\sphericalangle$', u'\u2223': '$\\mid$', u'\u2224': '$\\nmid$', u'\u2225': '$\\parallel$', u'\u2226': '$\\nparallel$', u'\u2227': '$\\wedge$', u'\u2228': '$\\vee$', u'\u2229': '$\\cap$', u'\u222a': '$\\cup$', u'\u222b': '$\\int$', u'\u222c': '$\\int\\!\\int$', u'\u222d': '$\\int\\!\\int\\!\\int$', u'\u222e': '$\\oint$', u'\u222f': '$\\surfintegral$', u'\u2230': '$\\volintegral$', u'\u2231': '$\\clwintegral$', u'\u2232': '$\\ElsevierGlyph{2232}$', u'\u2233': '$\\ElsevierGlyph{2233}$', u'\u2234': '$\\therefore$', u'\u2235': '$\\because$', u'\u2237': '$\\Colon$', u'\u2238': '$\\ElsevierGlyph{2238}$', u'\u223a': '$\\mathbin{{:}\\!\\!{-}\\!\\!{:}}$', u'\u223b': '$\\homothetic$', u'\u223c': '$\\sim$', u'\u223d': '$\\backsim$', u'\u223e': '$\\lazysinv$', u'\u2240': '$\\wr$', u'\u2241': '$\\not\\sim$', u'\u2242': '$\\ElsevierGlyph{2242}$', u'\u2243': '$\\simeq$', u'\u2244': '$\\not\\simeq$', u'\u2245': '$\\cong$', u'\u2246': '$\\approxnotequal$', u'\u2247': '$\\not\\cong$', u'\u2248': '$\\approx$', u'\u2249': '$\\not\\approx$', u'\u224a': '$\\approxeq$', u'\u224b': '$\\tildetrpl$', u'\u224c': '$\\allequal$', u'\u224d': '$\\asymp$', u'\u224e': '$\\Bumpeq$', u'\u224f': '$\\bumpeq$', u'\u2250': '$\\doteq$', u'\u2251': '$\\doteqdot$', u'\u2252': '$\\fallingdotseq$', u'\u2253': '$\\risingdotseq$', u'\u2254': '{:=}', u'\u2255': '$=:$', u'\u2256': '$\\eqcirc$', u'\u2257': '$\\circeq$', u'\u2259': '$\\estimates$', u'\u225a': '$\\ElsevierGlyph{225A}$', u'\u225b': '$\\starequal$', u'\u225c': '$\\triangleq$', u'\u225f': '$\\ElsevierGlyph{225F}$', u'\u2260': '$\\not =$', u'\u2261': '$\\equiv$', u'\u2262': '$\\not\\equiv$', u'\u2264': '$\\leq$', u'\u2265': '$\\geq$', u'\u2266': '$\\leqq$', u'\u2267': '$\\geqq$', u'\u2268': '$\\lneqq$', u'\u2269': '$\\gneqq$', u'\u226a': '$\\ll$', u'\u226b': '$\\gg$', u'\u226c': '$\\between$', u'\u226d': '$\\not\\kern-0.3em\\times$', u'\u226e': '$\\not<$', u'\u226f': '$\\not>$', u'\u2270': '$\\not\\leq$', u'\u2271': '$\\not\\geq$', u'\u2272': '$\\lessequivlnt$', u'\u2273': '$\\greaterequivlnt$', u'\u2274': '$\\ElsevierGlyph{2274}$', u'\u2275': '$\\ElsevierGlyph{2275}$', u'\u2276': '$\\lessgtr$', u'\u2277': '$\\gtrless$', u'\u2278': '$\\notlessgreater$', u'\u2279': '$\\notgreaterless$', u'\u227a': '$\\prec$', u'\u227b': '$\\succ$', u'\u227c': '$\\preccurlyeq$', u'\u227d': '$\\succcurlyeq$', u'\u227e': '$\\precapprox$', u'\u227f': '$\\succapprox$', u'\u2280': '$\\not\\prec$', u'\u2281': '$\\not\\succ$', u'\u2282': '$\\subset$', u'\u2283': '$\\supset$', u'\u2284': '$\\not\\subset$', u'\u2285': '$\\not\\supset$', u'\u2286': '$\\subseteq$', u'\u2287': '$\\supseteq$', u'\u2288': '$\\not\\subseteq$', u'\u2289': '$\\not\\supseteq$', u'\u228a': '$\\subsetneq$', u'\u228b': '$\\supsetneq$', u'\u228e': '$\\uplus$', u'\u228f': '$\\sqsubset$', u'\u2290': '$\\sqsupset$', u'\u2291': '$\\sqsubseteq$', u'\u2292': '$\\sqsupseteq$', u'\u2293': '$\\sqcap$', u'\u2294': '$\\sqcup$', u'\u2295': '$\\oplus$', u'\u2296': '$\\ominus$', u'\u2297': '$\\otimes$', u'\u2298': '$\\oslash$', u'\u2299': '$\\odot$', u'\u229a': '$\\circledcirc$', u'\u229b': '$\\circledast$', u'\u229d': '$\\circleddash$', u'\u229e': '$\\boxplus$', u'\u229f': '$\\boxminus$', u'\u22a0': '$\\boxtimes$', u'\u22a1': '$\\boxdot$', u'\u22a2': '$\\vdash$', u'\u22a3': '$\\dashv$', u'\u22a4': '$\\top$', u'\u22a5': '$\\perp$', u'\u22a7': '$\\truestate$', u'\u22a8': '$\\forcesextra$', u'\u22a9': '$\\Vdash$', u'\u22aa': '$\\Vvdash$', u'\u22ab': '$\\VDash$', u'\u22ac': '$\\nvdash$', u'\u22ad': '$\\nvDash$', u'\u22ae': '$\\nVdash$', u'\u22af': '$\\nVDash$', u'\u22b2': '$\\vartriangleleft$', u'\u22b3': '$\\vartriangleright$', u'\u22b4': '$\\trianglelefteq$', u'\u22b5': '$\\trianglerighteq$', u'\u22b6': '$\\original$', u'\u22b7': '$\\image$', u'\u22b8': '$\\multimap$', u'\u22b9': '$\\hermitconjmatrix$', u'\u22ba': '$\\intercal$', u'\u22bb': '$\\veebar$', u'\u22be': '$\\rightanglearc$', u'\u22c0': '$\\ElsevierGlyph{22C0}$', u'\u22c1': '$\\ElsevierGlyph{22C1}$', u'\u22c2': '$\\bigcap$', u'\u22c3': '$\\bigcup$', u'\u22c4': '$\\diamond$', u'\u22c5': '$\\cdot$', u'\u22c6': '$\\star$', u'\u22c7': '$\\divideontimes$', u'\u22c8': '$\\bowtie$', u'\u22c9': '$\\ltimes$', u'\u22ca': '$\\rtimes$', u'\u22cb': '$\\leftthreetimes$', u'\u22cc': '$\\rightthreetimes$', u'\u22cd': '$\\backsimeq$', u'\u22ce': '$\\curlyvee$', u'\u22cf': '$\\curlywedge$', u'\u22d0': '$\\Subset$', u'\u22d1': '$\\Supset$', u'\u22d2': '$\\Cap$', u'\u22d3': '$\\Cup$', u'\u22d4': '$\\pitchfork$', u'\u22d6': '$\\lessdot$', u'\u22d7': '$\\gtrdot$', u'\u22d8': '$\\verymuchless$', u'\u22d9': '$\\verymuchgreater$', u'\u22da': '$\\lesseqgtr$', u'\u22db': '$\\gtreqless$', u'\u22de': '$\\curlyeqprec$', u'\u22df': '$\\curlyeqsucc$', u'\u22e2': '$\\not\\sqsubseteq$', u'\u22e3': '$\\not\\sqsupseteq$', u'\u22e5': '$\\Elzsqspne$', u'\u22e6': '$\\lnsim$', u'\u22e7': '$\\gnsim$', u'\u22e8': '$\\precedesnotsimilar$', u'\u22e9': '$\\succnsim$', u'\u22ea': '$\\ntriangleleft$', u'\u22eb': '$\\ntriangleright$', u'\u22ec': '$\\ntrianglelefteq$', u'\u22ed': '$\\ntrianglerighteq$', u'\u22ee': '$\\vdots$', u'\u22ef': '$\\cdots$', u'\u22f0': '$\\upslopeellipsis$', u'\u22f1': '$\\downslopeellipsis$', u'\u2305': '{\\barwedge}', u'\u2306': '$\\perspcorrespond$', u'\u2308': '$\\lceil$', u'\u2309': '$\\rceil$', u'\u230a': '$\\lfloor$', u'\u230b': '$\\rfloor$', u'\u2315': '$\\recorder$', u'\u2316': '$\\mathchar"2208$', u'\u231c': '$\\ulcorner$', u'\u231d': '$\\urcorner$', u'\u231e': '$\\llcorner$', u'\u231f': '$\\lrcorner$', u'\u2322': '$\\frown$', u'\u2323': '$\\smile$', u'\u2329': '$\\langle$', u'\u232a': '$\\rangle$', u'\u233d': '$\\ElsevierGlyph{E838}$', u'\u23a3': '$\\Elzdlcorn$', u'\u23b0': '$\\lmoustache$', u'\u23b1': '$\\rmoustache$', u'\u2423': '{\\textvisiblespace}', u'\u2460': '{\\ding{172}}', u'\u2461': '{\\ding{173}}', u'\u2462': '{\\ding{174}}', u'\u2463': '{\\ding{175}}', u'\u2464': '{\\ding{176}}', u'\u2465': '{\\ding{177}}', u'\u2466': '{\\ding{178}}', u'\u2467': '{\\ding{179}}', u'\u2468': '{\\ding{180}}', u'\u2469': '{\\ding{181}}', u'\u24c8': '$\\circledS$', u'\u2506': '$\\Elzdshfnc$', u'\u2519': '$\\Elzsqfnw$', u'\u2571': '$\\diagup$', u'\u25a0': '{\\ding{110}}', u'\u25a1': '$\\square$', u'\u25aa': '$\\blacksquare$', u'\u25ad': '$\\fbox{~~}$', u'\u25af': '$\\Elzvrecto$', u'\u25b1': '$\\ElsevierGlyph{E381}$', u'\u25b2': '{\\ding{115}}', u'\u25b3': '$\\bigtriangleup$', u'\u25b4': '$\\blacktriangle$', u'\u25b5': '$\\vartriangle$', u'\u25b8': '$\\blacktriangleright$', u'\u25b9': '$\\triangleright$', u'\u25bc': '{\\ding{116}}', u'\u25bd': '$\\bigtriangledown$', u'\u25be': '$\\blacktriangledown$', u'\u25bf': '$\\triangledown$', u'\u25c2': '$\\blacktriangleleft$', u'\u25c3': '$\\triangleleft$', u'\u25c6': '{\\ding{117}}', u'\u25ca': '$\\lozenge$', u'\u25cb': '$\\bigcirc$', u'\u25cf': '{\\ding{108}}', u'\u25d0': '$\\Elzcirfl$', u'\u25d1': '$\\Elzcirfr$', u'\u25d2': '$\\Elzcirfb$', u'\u25d7': '{\\ding{119}}', u'\u25d8': '$\\Elzrvbull$', u'\u25e7': '$\\Elzsqfl$', u'\u25e8': '$\\Elzsqfr$', u'\u25ea': '$\\Elzsqfse$', u'\u25ef': '$\\bigcirc$', u'\u2605': '{\\ding{72}}', u'\u2606': '{\\ding{73}}', u'\u260e': '{\\ding{37}}', u'\u261b': '{\\ding{42}}', u'\u261e': '{\\ding{43}}', u'\u263e': '{\\rightmoon}', u'\u263f': '{\\mercury}', u'\u2640': '{\\venus}', u'\u2642': '{\\male}', u'\u2643': '{\\jupiter}', u'\u2644': '{\\saturn}', u'\u2645': '{\\uranus}', u'\u2646': '{\\neptune}', u'\u2647': '{\\pluto}', u'\u2648': '{\\aries}', u'\u2649': '{\\taurus}', u'\u264a': '{\\gemini}', u'\u264b': '{\\cancer}', u'\u264c': '{\\leo}', u'\u264d': '{\\virgo}', u'\u264e': '{\\libra}', u'\u264f': '{\\scorpio}', u'\u2650': '{\\sagittarius}', u'\u2651': '{\\capricornus}', u'\u2652': '{\\aquarius}', u'\u2653': '{\\pisces}', u'\u2660': '{\\ding{171}}', u'\u2662': '$\\diamond$', u'\u2663': '{\\ding{168}}', u'\u2665': '{\\ding{170}}', u'\u2666': '{\\ding{169}}', u'\u2669': '{\\quarternote}', u'\u266a': '{\\eighthnote}', u'\u266d': '$\\flat$', u'\u266e': '$\\natural$', u'\u266f': '$\\sharp$', u'\u2701': '{\\ding{33}}', u'\u2702': '{\\ding{34}}', u'\u2703': '{\\ding{35}}', u'\u2704': '{\\ding{36}}', u'\u2706': '{\\ding{38}}', u'\u2707': '{\\ding{39}}', u'\u2708': '{\\ding{40}}', u'\u2709': '{\\ding{41}}', u'\u270c': '{\\ding{44}}', u'\u270d': '{\\ding{45}}', u'\u270e': '{\\ding{46}}', u'\u270f': '{\\ding{47}}', u'\u2710': '{\\ding{48}}', u'\u2711': '{\\ding{49}}', u'\u2712': '{\\ding{50}}', u'\u2713': '{\\ding{51}}', u'\u2714': '{\\ding{52}}', u'\u2715': '{\\ding{53}}', u'\u2716': '{\\ding{54}}', u'\u2717': '{\\ding{55}}', u'\u2718': '{\\ding{56}}', u'\u2719': '{\\ding{57}}', u'\u271a': '{\\ding{58}}', u'\u271b': '{\\ding{59}}', u'\u271c': '{\\ding{60}}', u'\u271d': '{\\ding{61}}', u'\u271e': '{\\ding{62}}', u'\u271f': '{\\ding{63}}', u'\u2720': '{\\ding{64}}', u'\u2721': '{\\ding{65}}', u'\u2722': '{\\ding{66}}', u'\u2723': '{\\ding{67}}', u'\u2724': '{\\ding{68}}', u'\u2725': '{\\ding{69}}', u'\u2726': '{\\ding{70}}', u'\u2727': '{\\ding{71}}', u'\u2729': '{\\ding{73}}', u'\u272a': '{\\ding{74}}', u'\u272b': '{\\ding{75}}', u'\u272c': '{\\ding{76}}', u'\u272d': '{\\ding{77}}', u'\u272e': '{\\ding{78}}', u'\u272f': '{\\ding{79}}', u'\u2730': '{\\ding{80}}', u'\u2731': '{\\ding{81}}', u'\u2732': '{\\ding{82}}', u'\u2733': '{\\ding{83}}', u'\u2734': '{\\ding{84}}', u'\u2735': '{\\ding{85}}', u'\u2736': '{\\ding{86}}', u'\u2737': '{\\ding{87}}', u'\u2738': '{\\ding{88}}', u'\u2739': '{\\ding{89}}', u'\u273a': '{\\ding{90}}', u'\u273b': '{\\ding{91}}', u'\u273c': '{\\ding{92}}', u'\u273d': '{\\ding{93}}', u'\u273e': '{\\ding{94}}', u'\u273f': '{\\ding{95}}', u'\u2740': '{\\ding{96}}', u'\u2741': '{\\ding{97}}', u'\u2742': '{\\ding{98}}', u'\u2743': '{\\ding{99}}', u'\u2744': '{\\ding{100}}', u'\u2745': '{\\ding{101}}', u'\u2746': '{\\ding{102}}', u'\u2747': '{\\ding{103}}', u'\u2748': '{\\ding{104}}', u'\u2749': '{\\ding{105}}', u'\u274a': '{\\ding{106}}', u'\u274b': '{\\ding{107}}', u'\u274d': '{\\ding{109}}', u'\u274f': '{\\ding{111}}', u'\u2750': '{\\ding{112}}', u'\u2751': '{\\ding{113}}', u'\u2752': '{\\ding{114}}', u'\u2756': '{\\ding{118}}', u'\u2758': '{\\ding{120}}', u'\u2759': '{\\ding{121}}', u'\u275a': '{\\ding{122}}', u'\u275b': '{\\ding{123}}', u'\u275c': '{\\ding{124}}', u'\u275d': '{\\ding{125}}', u'\u275e': '{\\ding{126}}', u'\u2761': '{\\ding{161}}', u'\u2762': '{\\ding{162}}', u'\u2763': '{\\ding{163}}', u'\u2764': '{\\ding{164}}', u'\u2765': '{\\ding{165}}', u'\u2766': '{\\ding{166}}', u'\u2767': '{\\ding{167}}', u'\u2776': '{\\ding{182}}', u'\u2777': '{\\ding{183}}', u'\u2778': '{\\ding{184}}', u'\u2779': '{\\ding{185}}', u'\u277a': '{\\ding{186}}', u'\u277b': '{\\ding{187}}', u'\u277c': '{\\ding{188}}', u'\u277d': '{\\ding{189}}', u'\u277e': '{\\ding{190}}', u'\u277f': '{\\ding{191}}', u'\u2780': '{\\ding{192}}', u'\u2781': '{\\ding{193}}', u'\u2782': '{\\ding{194}}', u'\u2783': '{\\ding{195}}', u'\u2784': '{\\ding{196}}', u'\u2785': '{\\ding{197}}', u'\u2786': '{\\ding{198}}', u'\u2787': '{\\ding{199}}', u'\u2788': '{\\ding{200}}', u'\u2789': '{\\ding{201}}', u'\u278a': '{\\ding{202}}', u'\u278b': '{\\ding{203}}', u'\u278c': '{\\ding{204}}', u'\u278d': '{\\ding{205}}', u'\u278e': '{\\ding{206}}', u'\u278f': '{\\ding{207}}', u'\u2790': '{\\ding{208}}', u'\u2791': '{\\ding{209}}', u'\u2792': '{\\ding{210}}', u'\u2793': '{\\ding{211}}', u'\u2794': '{\\ding{212}}', u'\u2798': '{\\ding{216}}', u'\u2799': '{\\ding{217}}', u'\u279a': '{\\ding{218}}', u'\u279b': '{\\ding{219}}', u'\u279c': '{\\ding{220}}', u'\u279d': '{\\ding{221}}', u'\u279e': '{\\ding{222}}', u'\u279f': '{\\ding{223}}', u'\u27a0': '{\\ding{224}}', u'\u27a1': '{\\ding{225}}', u'\u27a2': '{\\ding{226}}', u'\u27a3': '{\\ding{227}}', u'\u27a4': '{\\ding{228}}', u'\u27a5': '{\\ding{229}}', u'\u27a6': '{\\ding{230}}', u'\u27a7': '{\\ding{231}}', u'\u27a8': '{\\ding{232}}', u'\u27a9': '{\\ding{233}}', u'\u27aa': '{\\ding{234}}', u'\u27ab': '{\\ding{235}}', u'\u27ac': '{\\ding{236}}', u'\u27ad': '{\\ding{237}}', u'\u27ae': '{\\ding{238}}', u'\u27af': '{\\ding{239}}', u'\u27b1': '{\\ding{241}}', u'\u27b2': '{\\ding{242}}', u'\u27b3': '{\\ding{243}}', u'\u27b4': '{\\ding{244}}', u'\u27b5': '{\\ding{245}}', u'\u27b6': '{\\ding{246}}', u'\u27b7': '{\\ding{247}}', u'\u27b8': '{\\ding{248}}', u'\u27b9': '{\\ding{249}}', u'\u27ba': '{\\ding{250}}', u'\u27bb': '{\\ding{251}}', u'\u27bc': '{\\ding{252}}', u'\u27bd': '{\\ding{253}}', u'\u27be': '{\\ding{254}}', u'\u27f5': '$\\longleftarrow$', u'\u27f6': '$\\longrightarrow$', u'\u27f7': '$\\longleftrightarrow$', u'\u27f8': '$\\Longleftarrow$', u'\u27f9': '$\\Longrightarrow$', u'\u27fa': '$\\Longleftrightarrow$', u'\u27fc': '$\\longmapsto$', u'\u27ff': '$\\sim\\joinrel\\leadsto$', u'\u2905': '$\\ElsevierGlyph{E212}$', u'\u2912': '$\\UpArrowBar$', u'\u2913': '$\\DownArrowBar$', u'\u2923': '$\\ElsevierGlyph{E20C}$', u'\u2924': '$\\ElsevierGlyph{E20D}$', u'\u2925': '$\\ElsevierGlyph{E20B}$', u'\u2926': '$\\ElsevierGlyph{E20A}$', u'\u2927': '$\\ElsevierGlyph{E211}$', u'\u2928': '$\\ElsevierGlyph{E20E}$', u'\u2929': '$\\ElsevierGlyph{E20F}$', u'\u292a': '$\\ElsevierGlyph{E210}$', u'\u2933': '$\\ElsevierGlyph{E21C}$', u'\u2936': '$\\ElsevierGlyph{E21A}$', u'\u2937': '$\\ElsevierGlyph{E219}$', u'\u2940': '$\\Elolarr$', u'\u2941': '$\\Elorarr$', u'\u2942': '$\\ElzRlarr$', u'\u2944': '$\\ElzrLarr$', u'\u2947': '$\\Elzrarrx$', u'\u294e': '$\\LeftRightVector$', u'\u294f': '$\\RightUpDownVector$', u'\u2950': '$\\DownLeftRightVector$', u'\u2951': '$\\LeftUpDownVector$', u'\u2952': '$\\LeftVectorBar$', u'\u2953': '$\\RightVectorBar$', u'\u2954': '$\\RightUpVectorBar$', u'\u2955': '$\\RightDownVectorBar$', u'\u2956': '$\\DownLeftVectorBar$', u'\u2957': '$\\DownRightVectorBar$', u'\u2958': '$\\LeftUpVectorBar$', u'\u2959': '$\\LeftDownVectorBar$', u'\u295a': '$\\LeftTeeVector$', u'\u295b': '$\\RightTeeVector$', u'\u295c': '$\\RightUpTeeVector$', u'\u295d': '$\\RightDownTeeVector$', u'\u295e': '$\\DownLeftTeeVector$', u'\u295f': '$\\DownRightTeeVector$', u'\u2960': '$\\LeftUpTeeVector$', u'\u2961': '$\\LeftDownTeeVector$', u'\u296e': '$\\UpEquilibrium$', u'\u296f': '$\\ReverseUpEquilibrium$', u'\u2970': '$\\RoundImplies$', u'\u297c': '$\\ElsevierGlyph{E214}$', u'\u297d': '$\\ElsevierGlyph{E215}$', u'\u2980': '$\\Elztfnc$', u'\u2985': '$\\ElsevierGlyph{3018}$', u'\u2986': '$\\Elroang$', u'\u2993': '$<\\kern-0.58em($', u'\u2994': '$\\ElsevierGlyph{E291}$', u'\u2999': '$\\Elzddfnc$', u'\u299c': '$\\Angle$', u'\u29a0': '$\\Elzlpargt$', u'\u29b5': '$\\ElsevierGlyph{E260}$', u'\u29b6': '$\\ElsevierGlyph{E61B}$', u'\u29ca': '$\\ElzLap$', u'\u29cb': '$\\Elzdefas$', u'\u29cf': '$\\LeftTriangleBar$', u'\u29d0': '$\\RightTriangleBar$', u'\u29dc': '$\\ElsevierGlyph{E372}$', u'\u29eb': '$\\blacklozenge$', u'\u29f4': '$\\RuleDelayed$', u'\u2a04': '$\\Elxuplus$', u'\u2a05': '$\\ElzThr$', u'\u2a06': '$\\Elxsqcup$', u'\u2a07': '$\\ElzInf$', u'\u2a08': '$\\ElzSup$', u'\u2a0d': '$\\ElzCint$', u'\u2a0f': '$\\clockoint$', u'\u2a10': '$\\ElsevierGlyph{E395}$', u'\u2a16': '$\\sqrint$', u'\u2a25': '$\\ElsevierGlyph{E25A}$', u'\u2a2a': '$\\ElsevierGlyph{E25B}$', u'\u2a2d': '$\\ElsevierGlyph{E25C}$', u'\u2a2e': '$\\ElsevierGlyph{E25D}$', u'\u2a2f': '$\\ElzTimes$', u'\u2a34': '$\\ElsevierGlyph{E25E}$', u'\u2a35': '$\\ElsevierGlyph{E25E}$', u'\u2a3c': '$\\ElsevierGlyph{E259}$', u'\u2a3f': '$\\amalg$', u'\u2a53': '$\\ElzAnd$', u'\u2a54': '$\\ElzOr$', u'\u2a55': '$\\ElsevierGlyph{E36E}$', u'\u2a56': '$\\ElOr$', u'\u2a5e': '$\\perspcorrespond$', u'\u2a5f': '$\\Elzminhat$', u'\u2a63': '$\\ElsevierGlyph{225A}$', u'\u2a6e': '$\\stackrel{*}{=}$', u'\u2a75': '$\\Equal$', u'\u2a7d': '$\\leqslant$', u'\u2a7e': '$\\geqslant$', u'\u2a85': '$\\lessapprox$', u'\u2a86': '$\\gtrapprox$', u'\u2a87': '$\\lneq$', u'\u2a88': '$\\gneq$', u'\u2a89': '$\\lnapprox$', u'\u2a8a': '$\\gnapprox$', u'\u2a8b': '$\\lesseqqgtr$', u'\u2a8c': '$\\gtreqqless$', u'\u2a95': '$\\eqslantless$', u'\u2a96': '$\\eqslantgtr$', u'\u2a9d': '$\\Pisymbol{ppi020}{117}$', u'\u2a9e': '$\\Pisymbol{ppi020}{105}$', u'\u2aa1': '$\\NestedLessLess$', u'\u2aa2': '$\\NestedGreaterGreater$', u'\u2aaf': '$\\preceq$', u'\u2ab0': '$\\succeq$', u'\u2ab5': '$\\precneqq$', u'\u2ab6': '$\\succneqq$', u'\u2ab7': '$\\precapprox$', u'\u2ab8': '$\\succapprox$', u'\u2ab9': '$\\precnapprox$', u'\u2aba': '$\\succnapprox$', u'\u2ac5': '$\\subseteqq$', u'\u2ac6': '$\\supseteqq$', u'\u2acb': '$\\subsetneqq$', u'\u2acc': '$\\supsetneqq$', u'\u2aeb': '$\\ElsevierGlyph{E30D}$', u'\u2af6': '$\\Elztdcol$', u'\u2afd': '${{/}\\!\\!{/}}$', u'\u300a': '$\\ElsevierGlyph{300A}$', u'\u300b': '$\\ElsevierGlyph{300B}$', u'\u3018': '$\\ElsevierGlyph{3018}$', u'\u3019': '$\\ElsevierGlyph{3019}$', u'\u301a': '$\\openbracketleft$', u'\u301b': '$\\openbracketright$', u'\ufb00': '{ff}', u'\ufb01': '{fi}', u'\ufb02': '{fl}', u'\ufb03': '{ffi}', u'\ufb04': '{ffl}', u'\U0001d400': '$\\mathbf{A}$', u'\U0001d401': '$\\mathbf{B}$', u'\U0001d402': '$\\mathbf{C}$', u'\U0001d403': '$\\mathbf{D}$', u'\U0001d404': '$\\mathbf{E}$', u'\U0001d405': '$\\mathbf{F}$', u'\U0001d406': '$\\mathbf{G}$', u'\U0001d407': '$\\mathbf{H}$', u'\U0001d408': '$\\mathbf{I}$', u'\U0001d409': '$\\mathbf{J}$', u'\U0001d40a': '$\\mathbf{K}$', u'\U0001d40b': '$\\mathbf{L}$', u'\U0001d40c': '$\\mathbf{M}$', u'\U0001d40d': '$\\mathbf{N}$', u'\U0001d40e': '$\\mathbf{O}$', u'\U0001d40f': '$\\mathbf{P}$', u'\U0001d410': '$\\mathbf{Q}$', u'\U0001d411': '$\\mathbf{R}$', u'\U0001d412': '$\\mathbf{S}$', u'\U0001d413': '$\\mathbf{T}$', u'\U0001d414': '$\\mathbf{U}$', u'\U0001d415': '$\\mathbf{V}$', u'\U0001d416': '$\\mathbf{W}$', u'\U0001d417': '$\\mathbf{X}$', u'\U0001d418': '$\\mathbf{Y}$', u'\U0001d419': '$\\mathbf{Z}$', u'\U0001d41a': '$\\mathbf{a}$', u'\U0001d41b': '$\\mathbf{b}$', u'\U0001d41c': '$\\mathbf{c}$', u'\U0001d41d': '$\\mathbf{d}$', u'\U0001d41e': '$\\mathbf{e}$', u'\U0001d41f': '$\\mathbf{f}$', u'\U0001d420': '$\\mathbf{g}$', u'\U0001d421': '$\\mathbf{h}$', u'\U0001d422': '$\\mathbf{i}$', u'\U0001d423': '$\\mathbf{j}$', u'\U0001d424': '$\\mathbf{k}$', u'\U0001d425': '$\\mathbf{l}$', u'\U0001d426': '$\\mathbf{m}$', u'\U0001d427': '$\\mathbf{n}$', u'\U0001d428': '$\\mathbf{o}$', u'\U0001d429': '$\\mathbf{p}$', u'\U0001d42a': '$\\mathbf{q}$', u'\U0001d42b': '$\\mathbf{r}$', u'\U0001d42c': '$\\mathbf{s}$', u'\U0001d42d': '$\\mathbf{t}$', u'\U0001d42e': '$\\mathbf{u}$', u'\U0001d42f': '$\\mathbf{v}$', u'\U0001d430': '$\\mathbf{w}$', u'\U0001d431': '$\\mathbf{x}$', u'\U0001d432': '$\\mathbf{y}$', u'\U0001d433': '$\\mathbf{z}$', u'\U0001d434': '$\\mathsl{A}$', u'\U0001d435': '$\\mathsl{B}$', u'\U0001d436': '$\\mathsl{C}$', u'\U0001d437': '$\\mathsl{D}$', u'\U0001d438': '$\\mathsl{E}$', u'\U0001d439': '$\\mathsl{F}$', u'\U0001d43a': '$\\mathsl{G}$', u'\U0001d43b': '$\\mathsl{H}$', u'\U0001d43c': '$\\mathsl{I}$', u'\U0001d43d': '$\\mathsl{J}$', u'\U0001d43e': '$\\mathsl{K}$', u'\U0001d43f': '$\\mathsl{L}$', u'\U0001d440': '$\\mathsl{M}$', u'\U0001d441': '$\\mathsl{N}$', u'\U0001d442': '$\\mathsl{O}$', u'\U0001d443': '$\\mathsl{P}$', u'\U0001d444': '$\\mathsl{Q}$', u'\U0001d445': '$\\mathsl{R}$', u'\U0001d446': '$\\mathsl{S}$', u'\U0001d447': '$\\mathsl{T}$', u'\U0001d448': '$\\mathsl{U}$', u'\U0001d449': '$\\mathsl{V}$', u'\U0001d44a': '$\\mathsl{W}$', u'\U0001d44b': '$\\mathsl{X}$', u'\U0001d44c': '$\\mathsl{Y}$', u'\U0001d44d': '$\\mathsl{Z}$', u'\U0001d44e': '$\\mathsl{a}$', u'\U0001d44f': '$\\mathsl{b}$', u'\U0001d450': '$\\mathsl{c}$', u'\U0001d451': '$\\mathsl{d}$', u'\U0001d452': '$\\mathsl{e}$', u'\U0001d453': '$\\mathsl{f}$', u'\U0001d454': '$\\mathsl{g}$', u'\U0001d456': '$\\mathsl{i}$', u'\U0001d457': '$\\mathsl{j}$', u'\U0001d458': '$\\mathsl{k}$', u'\U0001d459': '$\\mathsl{l}$', u'\U0001d45a': '$\\mathsl{m}$', u'\U0001d45b': '$\\mathsl{n}$', u'\U0001d45c': '$\\mathsl{o}$', u'\U0001d45d': '$\\mathsl{p}$', u'\U0001d45e': '$\\mathsl{q}$', u'\U0001d45f': '$\\mathsl{r}$', u'\U0001d460': '$\\mathsl{s}$', u'\U0001d461': '$\\mathsl{t}$', u'\U0001d462': '$\\mathsl{u}$', u'\U0001d463': '$\\mathsl{v}$', u'\U0001d464': '$\\mathsl{w}$', u'\U0001d465': '$\\mathsl{x}$', u'\U0001d466': '$\\mathsl{y}$', u'\U0001d467': '$\\mathsl{z}$', u'\U0001d468': '$\\mathbit{A}$', u'\U0001d469': '$\\mathbit{B}$', u'\U0001d46a': '$\\mathbit{C}$', u'\U0001d46b': '$\\mathbit{D}$', u'\U0001d46c': '$\\mathbit{E}$', u'\U0001d46d': '$\\mathbit{F}$', u'\U0001d46e': '$\\mathbit{G}$', u'\U0001d46f': '$\\mathbit{H}$', u'\U0001d470': '$\\mathbit{I}$', u'\U0001d471': '$\\mathbit{J}$', u'\U0001d472': '$\\mathbit{K}$', u'\U0001d473': '$\\mathbit{L}$', u'\U0001d474': '$\\mathbit{M}$', u'\U0001d475': '$\\mathbit{N}$', u'\U0001d476': '$\\mathbit{O}$', u'\U0001d477': '$\\mathbit{P}$', u'\U0001d478': '$\\mathbit{Q}$', u'\U0001d479': '$\\mathbit{R}$', u'\U0001d47a': '$\\mathbit{S}$', u'\U0001d47b': '$\\mathbit{T}$', u'\U0001d47c': '$\\mathbit{U}$', u'\U0001d47d': '$\\mathbit{V}$', u'\U0001d47e': '$\\mathbit{W}$', u'\U0001d47f': '$\\mathbit{X}$', u'\U0001d480': '$\\mathbit{Y}$', u'\U0001d481': '$\\mathbit{Z}$', u'\U0001d482': '$\\mathbit{a}$', u'\U0001d483': '$\\mathbit{b}$', u'\U0001d484': '$\\mathbit{c}$', u'\U0001d485': '$\\mathbit{d}$', u'\U0001d486': '$\\mathbit{e}$', u'\U0001d487': '$\\mathbit{f}$', u'\U0001d488': '$\\mathbit{g}$', u'\U0001d489': '$\\mathbit{h}$', u'\U0001d48a': '$\\mathbit{i}$', u'\U0001d48b': '$\\mathbit{j}$', u'\U0001d48c': '$\\mathbit{k}$', u'\U0001d48d': '$\\mathbit{l}$', u'\U0001d48e': '$\\mathbit{m}$', u'\U0001d48f': '$\\mathbit{n}$', u'\U0001d490': '$\\mathbit{o}$', u'\U0001d491': '$\\mathbit{p}$', u'\U0001d492': '$\\mathbit{q}$', u'\U0001d493': '$\\mathbit{r}$', u'\U0001d494': '$\\mathbit{s}$', u'\U0001d495': '$\\mathbit{t}$', u'\U0001d496': '$\\mathbit{u}$', u'\U0001d497': '$\\mathbit{v}$', u'\U0001d498': '$\\mathbit{w}$', u'\U0001d499': '$\\mathbit{x}$', u'\U0001d49a': '$\\mathbit{y}$', u'\U0001d49b': '$\\mathbit{z}$', u'\U0001d49c': '$\\mathscr{A}$', u'\U0001d49e': '$\\mathscr{C}$', u'\U0001d49f': '$\\mathscr{D}$', u'\U0001d4a2': '$\\mathscr{G}$', u'\U0001d4a5': '$\\mathscr{J}$', u'\U0001d4a6': '$\\mathscr{K}$', u'\U0001d4a9': '$\\mathscr{N}$', u'\U0001d4aa': '$\\mathscr{O}$', u'\U0001d4ab': '$\\mathscr{P}$', u'\U0001d4ac': '$\\mathscr{Q}$', u'\U0001d4ae': '$\\mathscr{S}$', u'\U0001d4af': '$\\mathscr{T}$', u'\U0001d4b0': '$\\mathscr{U}$', u'\U0001d4b1': '$\\mathscr{V}$', u'\U0001d4b2': '$\\mathscr{W}$', u'\U0001d4b3': '$\\mathscr{X}$', u'\U0001d4b4': '$\\mathscr{Y}$', u'\U0001d4b5': '$\\mathscr{Z}$', u'\U0001d4b6': '$\\mathscr{a}$', u'\U0001d4b7': '$\\mathscr{b}$', u'\U0001d4b8': '$\\mathscr{c}$', u'\U0001d4b9': '$\\mathscr{d}$', u'\U0001d4bb': '$\\mathscr{f}$', u'\U0001d4bd': '$\\mathscr{h}$', u'\U0001d4be': '$\\mathscr{i}$', u'\U0001d4bf': '$\\mathscr{j}$', u'\U0001d4c0': '$\\mathscr{k}$', u'\U0001d4c1': '$\\mathscr{l}$', u'\U0001d4c2': '$\\mathscr{m}$', u'\U0001d4c3': '$\\mathscr{n}$', u'\U0001d4c5': '$\\mathscr{p}$', u'\U0001d4c6': '$\\mathscr{q}$', u'\U0001d4c7': '$\\mathscr{r}$', u'\U0001d4c8': '$\\mathscr{s}$', u'\U0001d4c9': '$\\mathscr{t}$', u'\U0001d4ca': '$\\mathscr{u}$', u'\U0001d4cb': '$\\mathscr{v}$', u'\U0001d4cc': '$\\mathscr{w}$', u'\U0001d4cd': '$\\mathscr{x}$', u'\U0001d4ce': '$\\mathscr{y}$', u'\U0001d4cf': '$\\mathscr{z}$', u'\U0001d4d0': '$\\mathmit{A}$', u'\U0001d4d1': '$\\mathmit{B}$', u'\U0001d4d2': '$\\mathmit{C}$', u'\U0001d4d3': '$\\mathmit{D}$', u'\U0001d4d4': '$\\mathmit{E}$', u'\U0001d4d5': '$\\mathmit{F}$', u'\U0001d4d6': '$\\mathmit{G}$', u'\U0001d4d7': '$\\mathmit{H}$', u'\U0001d4d8': '$\\mathmit{I}$', u'\U0001d4d9': '$\\mathmit{J}$', u'\U0001d4da': '$\\mathmit{K}$', u'\U0001d4db': '$\\mathmit{L}$', u'\U0001d4dc': '$\\mathmit{M}$', u'\U0001d4dd': '$\\mathmit{N}$', u'\U0001d4de': '$\\mathmit{O}$', u'\U0001d4df': '$\\mathmit{P}$', u'\U0001d4e0': '$\\mathmit{Q}$', u'\U0001d4e1': '$\\mathmit{R}$', u'\U0001d4e2': '$\\mathmit{S}$', u'\U0001d4e3': '$\\mathmit{T}$', u'\U0001d4e4': '$\\mathmit{U}$', u'\U0001d4e5': '$\\mathmit{V}$', u'\U0001d4e6': '$\\mathmit{W}$', u'\U0001d4e7': '$\\mathmit{X}$', u'\U0001d4e8': '$\\mathmit{Y}$', u'\U0001d4e9': '$\\mathmit{Z}$', u'\U0001d4ea': '$\\mathmit{a}$', u'\U0001d4eb': '$\\mathmit{b}$', u'\U0001d4ec': '$\\mathmit{c}$', u'\U0001d4ed': '$\\mathmit{d}$', u'\U0001d4ee': '$\\mathmit{e}$', u'\U0001d4ef': '$\\mathmit{f}$', u'\U0001d4f0': '$\\mathmit{g}$', u'\U0001d4f1': '$\\mathmit{h}$', u'\U0001d4f2': '$\\mathmit{i}$', u'\U0001d4f3': '$\\mathmit{j}$', u'\U0001d4f4': '$\\mathmit{k}$', u'\U0001d4f5': '$\\mathmit{l}$', u'\U0001d4f6': '$\\mathmit{m}$', u'\U0001d4f7': '$\\mathmit{n}$', u'\U0001d4f8': '$\\mathmit{o}$', u'\U0001d4f9': '$\\mathmit{p}$', u'\U0001d4fa': '$\\mathmit{q}$', u'\U0001d4fb': '$\\mathmit{r}$', u'\U0001d4fc': '$\\mathmit{s}$', u'\U0001d4fd': '$\\mathmit{t}$', u'\U0001d4fe': '$\\mathmit{u}$', u'\U0001d4ff': '$\\mathmit{v}$', u'\U0001d500': '$\\mathmit{w}$', u'\U0001d501': '$\\mathmit{x}$', u'\U0001d502': '$\\mathmit{y}$', u'\U0001d503': '$\\mathmit{z}$', u'\U0001d504': '$\\mathfrak{A}$', u'\U0001d505': '$\\mathfrak{B}$', u'\U0001d507': '$\\mathfrak{D}$', u'\U0001d508': '$\\mathfrak{E}$', u'\U0001d509': '$\\mathfrak{F}$', u'\U0001d50a': '$\\mathfrak{G}$', u'\U0001d50d': '$\\mathfrak{J}$', u'\U0001d50e': '$\\mathfrak{K}$', u'\U0001d50f': '$\\mathfrak{L}$', u'\U0001d510': '$\\mathfrak{M}$', u'\U0001d511': '$\\mathfrak{N}$', u'\U0001d512': '$\\mathfrak{O}$', u'\U0001d513': '$\\mathfrak{P}$', u'\U0001d514': '$\\mathfrak{Q}$', u'\U0001d516': '$\\mathfrak{S}$', u'\U0001d517': '$\\mathfrak{T}$', u'\U0001d518': '$\\mathfrak{U}$', u'\U0001d519': '$\\mathfrak{V}$', u'\U0001d51a': '$\\mathfrak{W}$', u'\U0001d51b': '$\\mathfrak{X}$', u'\U0001d51c': '$\\mathfrak{Y}$', u'\U0001d51e': '$\\mathfrak{a}$', u'\U0001d51f': '$\\mathfrak{b}$', u'\U0001d520': '$\\mathfrak{c}$', u'\U0001d521': '$\\mathfrak{d}$', u'\U0001d522': '$\\mathfrak{e}$', u'\U0001d523': '$\\mathfrak{f}$', u'\U0001d524': '$\\mathfrak{g}$', u'\U0001d525': '$\\mathfrak{h}$', u'\U0001d526': '$\\mathfrak{i}$', u'\U0001d527': '$\\mathfrak{j}$', u'\U0001d528': '$\\mathfrak{k}$', u'\U0001d529': '$\\mathfrak{l}$', u'\U0001d52a': '$\\mathfrak{m}$', u'\U0001d52b': '$\\mathfrak{n}$', u'\U0001d52c': '$\\mathfrak{o}$', u'\U0001d52d': '$\\mathfrak{p}$', u'\U0001d52e': '$\\mathfrak{q}$', u'\U0001d52f': '$\\mathfrak{r}$', u'\U0001d530': '$\\mathfrak{s}$', u'\U0001d531': '$\\mathfrak{t}$', u'\U0001d532': '$\\mathfrak{u}$', u'\U0001d533': '$\\mathfrak{v}$', u'\U0001d534': '$\\mathfrak{w}$', u'\U0001d535': '$\\mathfrak{x}$', u'\U0001d536': '$\\mathfrak{y}$', u'\U0001d537': '$\\mathfrak{z}$', u'\U0001d538': '$\\mathbb{A}$', u'\U0001d539': '$\\mathbb{B}$', u'\U0001d53b': '$\\mathbb{D}$', u'\U0001d53c': '$\\mathbb{E}$', u'\U0001d53d': '$\\mathbb{F}$', u'\U0001d53e': '$\\mathbb{G}$', u'\U0001d540': '$\\mathbb{I}$', u'\U0001d541': '$\\mathbb{J}$', u'\U0001d542': '$\\mathbb{K}$', u'\U0001d543': '$\\mathbb{L}$', u'\U0001d544': '$\\mathbb{M}$', u'\U0001d546': '$\\mathbb{O}$', u'\U0001d54a': '$\\mathbb{S}$', u'\U0001d54b': '$\\mathbb{T}$', u'\U0001d54c': '$\\mathbb{U}$', u'\U0001d54d': '$\\mathbb{V}$', u'\U0001d54e': '$\\mathbb{W}$', u'\U0001d54f': '$\\mathbb{X}$', u'\U0001d550': '$\\mathbb{Y}$', u'\U0001d552': '$\\mathbb{a}$', u'\U0001d553': '$\\mathbb{b}$', u'\U0001d554': '$\\mathbb{c}$', u'\U0001d555': '$\\mathbb{d}$', u'\U0001d556': '$\\mathbb{e}$', u'\U0001d557': '$\\mathbb{f}$', u'\U0001d558': '$\\mathbb{g}$', u'\U0001d559': '$\\mathbb{h}$', u'\U0001d55a': '$\\mathbb{i}$', u'\U0001d55b': '$\\mathbb{j}$', u'\U0001d55c': '$\\mathbb{k}$', u'\U0001d55d': '$\\mathbb{l}$', u'\U0001d55e': '$\\mathbb{m}$', u'\U0001d55f': '$\\mathbb{n}$', u'\U0001d560': '$\\mathbb{o}$', u'\U0001d561': '$\\mathbb{p}$', u'\U0001d562': '$\\mathbb{q}$', u'\U0001d563': '$\\mathbb{r}$', u'\U0001d564': '$\\mathbb{s}$', u'\U0001d565': '$\\mathbb{t}$', u'\U0001d566': '$\\mathbb{u}$', u'\U0001d567': '$\\mathbb{v}$', u'\U0001d568': '$\\mathbb{w}$', u'\U0001d569': '$\\mathbb{x}$', u'\U0001d56a': '$\\mathbb{y}$', u'\U0001d56b': '$\\mathbb{z}$', u'\U0001d56c': '$\\mathslbb{A}$', u'\U0001d56d': '$\\mathslbb{B}$', u'\U0001d56e': '$\\mathslbb{C}$', u'\U0001d56f': '$\\mathslbb{D}$', u'\U0001d570': '$\\mathslbb{E}$', u'\U0001d571': '$\\mathslbb{F}$', u'\U0001d572': '$\\mathslbb{G}$', u'\U0001d573': '$\\mathslbb{H}$', u'\U0001d574': '$\\mathslbb{I}$', u'\U0001d575': '$\\mathslbb{J}$', u'\U0001d576': '$\\mathslbb{K}$', u'\U0001d577': '$\\mathslbb{L}$', u'\U0001d578': '$\\mathslbb{M}$', u'\U0001d579': '$\\mathslbb{N}$', u'\U0001d57a': '$\\mathslbb{O}$', u'\U0001d57b': '$\\mathslbb{P}$', u'\U0001d57c': '$\\mathslbb{Q}$', u'\U0001d57d': '$\\mathslbb{R}$', u'\U0001d57e': '$\\mathslbb{S}$', u'\U0001d57f': '$\\mathslbb{T}$', u'\U0001d580': '$\\mathslbb{U}$', u'\U0001d581': '$\\mathslbb{V}$', u'\U0001d582': '$\\mathslbb{W}$', u'\U0001d583': '$\\mathslbb{X}$', u'\U0001d584': '$\\mathslbb{Y}$', u'\U0001d585': '$\\mathslbb{Z}$', u'\U0001d586': '$\\mathslbb{a}$', u'\U0001d587': '$\\mathslbb{b}$', u'\U0001d588': '$\\mathslbb{c}$', u'\U0001d589': '$\\mathslbb{d}$', u'\U0001d58a': '$\\mathslbb{e}$', u'\U0001d58b': '$\\mathslbb{f}$', u'\U0001d58c': '$\\mathslbb{g}$', u'\U0001d58d': '$\\mathslbb{h}$', u'\U0001d58e': '$\\mathslbb{i}$', u'\U0001d58f': '$\\mathslbb{j}$', u'\U0001d590': '$\\mathslbb{k}$', u'\U0001d591': '$\\mathslbb{l}$', u'\U0001d592': '$\\mathslbb{m}$', u'\U0001d593': '$\\mathslbb{n}$', u'\U0001d594': '$\\mathslbb{o}$', u'\U0001d595': '$\\mathslbb{p}$', u'\U0001d596': '$\\mathslbb{q}$', u'\U0001d597': '$\\mathslbb{r}$', u'\U0001d598': '$\\mathslbb{s}$', u'\U0001d599': '$\\mathslbb{t}$', u'\U0001d59a': '$\\mathslbb{u}$', u'\U0001d59b': '$\\mathslbb{v}$', u'\U0001d59c': '$\\mathslbb{w}$', u'\U0001d59d': '$\\mathslbb{x}$', u'\U0001d59e': '$\\mathslbb{y}$', u'\U0001d59f': '$\\mathslbb{z}$', u'\U0001d5a0': '$\\mathsf{A}$', u'\U0001d5a1': '$\\mathsf{B}$', u'\U0001d5a2': '$\\mathsf{C}$', u'\U0001d5a3': '$\\mathsf{D}$', u'\U0001d5a4': '$\\mathsf{E}$', u'\U0001d5a5': '$\\mathsf{F}$', u'\U0001d5a6': '$\\mathsf{G}$', u'\U0001d5a7': '$\\mathsf{H}$', u'\U0001d5a8': '$\\mathsf{I}$', u'\U0001d5a9': '$\\mathsf{J}$', u'\U0001d5aa': '$\\mathsf{K}$', u'\U0001d5ab': '$\\mathsf{L}$', u'\U0001d5ac': '$\\mathsf{M}$', u'\U0001d5ad': '$\\mathsf{N}$', u'\U0001d5ae': '$\\mathsf{O}$', u'\U0001d5af': '$\\mathsf{P}$', u'\U0001d5b0': '$\\mathsf{Q}$', u'\U0001d5b1': '$\\mathsf{R}$', u'\U0001d5b2': '$\\mathsf{S}$', u'\U0001d5b3': '$\\mathsf{T}$', u'\U0001d5b4': '$\\mathsf{U}$', u'\U0001d5b5': '$\\mathsf{V}$', u'\U0001d5b6': '$\\mathsf{W}$', u'\U0001d5b7': '$\\mathsf{X}$', u'\U0001d5b8': '$\\mathsf{Y}$', u'\U0001d5b9': '$\\mathsf{Z}$', u'\U0001d5ba': '$\\mathsf{a}$', u'\U0001d5bb': '$\\mathsf{b}$', u'\U0001d5bc': '$\\mathsf{c}$', u'\U0001d5bd': '$\\mathsf{d}$', u'\U0001d5be': '$\\mathsf{e}$', u'\U0001d5bf': '$\\mathsf{f}$', u'\U0001d5c0': '$\\mathsf{g}$', u'\U0001d5c1': '$\\mathsf{h}$', u'\U0001d5c2': '$\\mathsf{i}$', u'\U0001d5c3': '$\\mathsf{j}$', u'\U0001d5c4': '$\\mathsf{k}$', u'\U0001d5c5': '$\\mathsf{l}$', u'\U0001d5c6': '$\\mathsf{m}$', u'\U0001d5c7': '$\\mathsf{n}$', u'\U0001d5c8': '$\\mathsf{o}$', u'\U0001d5c9': '$\\mathsf{p}$', u'\U0001d5ca': '$\\mathsf{q}$', u'\U0001d5cb': '$\\mathsf{r}$', u'\U0001d5cc': '$\\mathsf{s}$', u'\U0001d5cd': '$\\mathsf{t}$', u'\U0001d5ce': '$\\mathsf{u}$', u'\U0001d5cf': '$\\mathsf{v}$', u'\U0001d5d0': '$\\mathsf{w}$', u'\U0001d5d1': '$\\mathsf{x}$', u'\U0001d5d2': '$\\mathsf{y}$', u'\U0001d5d3': '$\\mathsf{z}$', u'\U0001d5d4': '$\\mathsfbf{A}$', u'\U0001d5d5': '$\\mathsfbf{B}$', u'\U0001d5d6': '$\\mathsfbf{C}$', u'\U0001d5d7': '$\\mathsfbf{D}$', u'\U0001d5d8': '$\\mathsfbf{E}$', u'\U0001d5d9': '$\\mathsfbf{F}$', u'\U0001d5da': '$\\mathsfbf{G}$', u'\U0001d5db': '$\\mathsfbf{H}$', u'\U0001d5dc': '$\\mathsfbf{I}$', u'\U0001d5dd': '$\\mathsfbf{J}$', u'\U0001d5de': '$\\mathsfbf{K}$', u'\U0001d5df': '$\\mathsfbf{L}$', u'\U0001d5e0': '$\\mathsfbf{M}$', u'\U0001d5e1': '$\\mathsfbf{N}$', u'\U0001d5e2': '$\\mathsfbf{O}$', u'\U0001d5e3': '$\\mathsfbf{P}$', u'\U0001d5e4': '$\\mathsfbf{Q}$', u'\U0001d5e5': '$\\mathsfbf{R}$', u'\U0001d5e6': '$\\mathsfbf{S}$', u'\U0001d5e7': '$\\mathsfbf{T}$', u'\U0001d5e8': '$\\mathsfbf{U}$', u'\U0001d5e9': '$\\mathsfbf{V}$', u'\U0001d5ea': '$\\mathsfbf{W}$', u'\U0001d5eb': '$\\mathsfbf{X}$', u'\U0001d5ec': '$\\mathsfbf{Y}$', u'\U0001d5ed': '$\\mathsfbf{Z}$', u'\U0001d5ee': '$\\mathsfbf{a}$', u'\U0001d5ef': '$\\mathsfbf{b}$', u'\U0001d5f0': '$\\mathsfbf{c}$', u'\U0001d5f1': '$\\mathsfbf{d}$', u'\U0001d5f2': '$\\mathsfbf{e}$', u'\U0001d5f3': '$\\mathsfbf{f}$', u'\U0001d5f4': '$\\mathsfbf{g}$', u'\U0001d5f5': '$\\mathsfbf{h}$', u'\U0001d5f6': '$\\mathsfbf{i}$', u'\U0001d5f7': '$\\mathsfbf{j}$', u'\U0001d5f8': '$\\mathsfbf{k}$', u'\U0001d5f9': '$\\mathsfbf{l}$', u'\U0001d5fa': '$\\mathsfbf{m}$', u'\U0001d5fb': '$\\mathsfbf{n}$', u'\U0001d5fc': '$\\mathsfbf{o}$', u'\U0001d5fd': '$\\mathsfbf{p}$', u'\U0001d5fe': '$\\mathsfbf{q}$', u'\U0001d5ff': '$\\mathsfbf{r}$', u'\U0001d600': '$\\mathsfbf{s}$', u'\U0001d601': '$\\mathsfbf{t}$', u'\U0001d602': '$\\mathsfbf{u}$', u'\U0001d603': '$\\mathsfbf{v}$', u'\U0001d604': '$\\mathsfbf{w}$', u'\U0001d605': '$\\mathsfbf{x}$', u'\U0001d606': '$\\mathsfbf{y}$', u'\U0001d607': '$\\mathsfbf{z}$', u'\U0001d608': '$\\mathsfsl{A}$', u'\U0001d609': '$\\mathsfsl{B}$', u'\U0001d60a': '$\\mathsfsl{C}$', u'\U0001d60b': '$\\mathsfsl{D}$', u'\U0001d60c': '$\\mathsfsl{E}$', u'\U0001d60d': '$\\mathsfsl{F}$', u'\U0001d60e': '$\\mathsfsl{G}$', u'\U0001d60f': '$\\mathsfsl{H}$', u'\U0001d610': '$\\mathsfsl{I}$', u'\U0001d611': '$\\mathsfsl{J}$', u'\U0001d612': '$\\mathsfsl{K}$', u'\U0001d613': '$\\mathsfsl{L}$', u'\U0001d614': '$\\mathsfsl{M}$', u'\U0001d615': '$\\mathsfsl{N}$', u'\U0001d616': '$\\mathsfsl{O}$', u'\U0001d617': '$\\mathsfsl{P}$', u'\U0001d618': '$\\mathsfsl{Q}$', u'\U0001d619': '$\\mathsfsl{R}$', u'\U0001d61a': '$\\mathsfsl{S}$', u'\U0001d61b': '$\\mathsfsl{T}$', u'\U0001d61c': '$\\mathsfsl{U}$', u'\U0001d61d': '$\\mathsfsl{V}$', u'\U0001d61e': '$\\mathsfsl{W}$', u'\U0001d61f': '$\\mathsfsl{X}$', u'\U0001d620': '$\\mathsfsl{Y}$', u'\U0001d621': '$\\mathsfsl{Z}$', u'\U0001d622': '$\\mathsfsl{a}$', u'\U0001d623': '$\\mathsfsl{b}$', u'\U0001d624': '$\\mathsfsl{c}$', u'\U0001d625': '$\\mathsfsl{d}$', u'\U0001d626': '$\\mathsfsl{e}$', u'\U0001d627': '$\\mathsfsl{f}$', u'\U0001d628': '$\\mathsfsl{g}$', u'\U0001d629': '$\\mathsfsl{h}$', u'\U0001d62a': '$\\mathsfsl{i}$', u'\U0001d62b': '$\\mathsfsl{j}$', u'\U0001d62c': '$\\mathsfsl{k}$', u'\U0001d62d': '$\\mathsfsl{l}$', u'\U0001d62e': '$\\mathsfsl{m}$', u'\U0001d62f': '$\\mathsfsl{n}$', u'\U0001d630': '$\\mathsfsl{o}$', u'\U0001d631': '$\\mathsfsl{p}$', u'\U0001d632': '$\\mathsfsl{q}$', u'\U0001d633': '$\\mathsfsl{r}$', u'\U0001d634': '$\\mathsfsl{s}$', u'\U0001d635': '$\\mathsfsl{t}$', u'\U0001d636': '$\\mathsfsl{u}$', u'\U0001d637': '$\\mathsfsl{v}$', u'\U0001d638': '$\\mathsfsl{w}$', u'\U0001d639': '$\\mathsfsl{x}$', u'\U0001d63a': '$\\mathsfsl{y}$', u'\U0001d63b': '$\\mathsfsl{z}$', u'\U0001d63c': '$\\mathsfbfsl{A}$', u'\U0001d63d': '$\\mathsfbfsl{B}$', u'\U0001d63e': '$\\mathsfbfsl{C}$', u'\U0001d63f': '$\\mathsfbfsl{D}$', u'\U0001d640': '$\\mathsfbfsl{E}$', u'\U0001d641': '$\\mathsfbfsl{F}$', u'\U0001d642': '$\\mathsfbfsl{G}$', u'\U0001d643': '$\\mathsfbfsl{H}$', u'\U0001d644': '$\\mathsfbfsl{I}$', u'\U0001d645': '$\\mathsfbfsl{J}$', u'\U0001d646': '$\\mathsfbfsl{K}$', u'\U0001d647': '$\\mathsfbfsl{L}$', u'\U0001d648': '$\\mathsfbfsl{M}$', u'\U0001d649': '$\\mathsfbfsl{N}$', u'\U0001d64a': '$\\mathsfbfsl{O}$', u'\U0001d64b': '$\\mathsfbfsl{P}$', u'\U0001d64c': '$\\mathsfbfsl{Q}$', u'\U0001d64d': '$\\mathsfbfsl{R}$', u'\U0001d64e': '$\\mathsfbfsl{S}$', u'\U0001d64f': '$\\mathsfbfsl{T}$', u'\U0001d650': '$\\mathsfbfsl{U}$', u'\U0001d651': '$\\mathsfbfsl{V}$', u'\U0001d652': '$\\mathsfbfsl{W}$', u'\U0001d653': '$\\mathsfbfsl{X}$', u'\U0001d654': '$\\mathsfbfsl{Y}$', u'\U0001d655': '$\\mathsfbfsl{Z}$', u'\U0001d656': '$\\mathsfbfsl{a}$', u'\U0001d657': '$\\mathsfbfsl{b}$', u'\U0001d658': '$\\mathsfbfsl{c}$', u'\U0001d659': '$\\mathsfbfsl{d}$', u'\U0001d65a': '$\\mathsfbfsl{e}$', u'\U0001d65b': '$\\mathsfbfsl{f}$', u'\U0001d65c': '$\\mathsfbfsl{g}$', u'\U0001d65d': '$\\mathsfbfsl{h}$', u'\U0001d65e': '$\\mathsfbfsl{i}$', u'\U0001d65f': '$\\mathsfbfsl{j}$', u'\U0001d660': '$\\mathsfbfsl{k}$', u'\U0001d661': '$\\mathsfbfsl{l}$', u'\U0001d662': '$\\mathsfbfsl{m}$', u'\U0001d663': '$\\mathsfbfsl{n}$', u'\U0001d664': '$\\mathsfbfsl{o}$', u'\U0001d665': '$\\mathsfbfsl{p}$', u'\U0001d666': '$\\mathsfbfsl{q}$', u'\U0001d667': '$\\mathsfbfsl{r}$', u'\U0001d668': '$\\mathsfbfsl{s}$', u'\U0001d669': '$\\mathsfbfsl{t}$', u'\U0001d66a': '$\\mathsfbfsl{u}$', u'\U0001d66b': '$\\mathsfbfsl{v}$', u'\U0001d66c': '$\\mathsfbfsl{w}$', u'\U0001d66d': '$\\mathsfbfsl{x}$', u'\U0001d66e': '$\\mathsfbfsl{y}$', u'\U0001d66f': '$\\mathsfbfsl{z}$', u'\U0001d670': '$\\mathtt{A}$', u'\U0001d671': '$\\mathtt{B}$', u'\U0001d672': '$\\mathtt{C}$', u'\U0001d673': '$\\mathtt{D}$', u'\U0001d674': '$\\mathtt{E}$', u'\U0001d675': '$\\mathtt{F}$', u'\U0001d676': '$\\mathtt{G}$', u'\U0001d677': '$\\mathtt{H}$', u'\U0001d678': '$\\mathtt{I}$', u'\U0001d679': '$\\mathtt{J}$', u'\U0001d67a': '$\\mathtt{K}$', u'\U0001d67b': '$\\mathtt{L}$', u'\U0001d67c': '$\\mathtt{M}$', u'\U0001d67d': '$\\mathtt{N}$', u'\U0001d67e': '$\\mathtt{O}$', u'\U0001d67f': '$\\mathtt{P}$', u'\U0001d680': '$\\mathtt{Q}$', u'\U0001d681': '$\\mathtt{R}$', u'\U0001d682': '$\\mathtt{S}$', u'\U0001d683': '$\\mathtt{T}$', u'\U0001d684': '$\\mathtt{U}$', u'\U0001d685': '$\\mathtt{V}$', u'\U0001d686': '$\\mathtt{W}$', u'\U0001d687': '$\\mathtt{X}$', u'\U0001d688': '$\\mathtt{Y}$', u'\U0001d689': '$\\mathtt{Z}$', u'\U0001d68a': '$\\mathtt{a}$', u'\U0001d68b': '$\\mathtt{b}$', u'\U0001d68c': '$\\mathtt{c}$', u'\U0001d68d': '$\\mathtt{d}$', u'\U0001d68e': '$\\mathtt{e}$', u'\U0001d68f': '$\\mathtt{f}$', u'\U0001d690': '$\\mathtt{g}$', u'\U0001d691': '$\\mathtt{h}$', u'\U0001d692': '$\\mathtt{i}$', u'\U0001d693': '$\\mathtt{j}$', u'\U0001d694': '$\\mathtt{k}$', u'\U0001d695': '$\\mathtt{l}$', u'\U0001d696': '$\\mathtt{m}$', u'\U0001d697': '$\\mathtt{n}$', u'\U0001d698': '$\\mathtt{o}$', u'\U0001d699': '$\\mathtt{p}$', u'\U0001d69a': '$\\mathtt{q}$', u'\U0001d69b': '$\\mathtt{r}$', u'\U0001d69c': '$\\mathtt{s}$', u'\U0001d69d': '$\\mathtt{t}$', u'\U0001d69e': '$\\mathtt{u}$', u'\U0001d69f': '$\\mathtt{v}$', u'\U0001d6a0': '$\\mathtt{w}$', u'\U0001d6a1': '$\\mathtt{x}$', u'\U0001d6a2': '$\\mathtt{y}$', u'\U0001d6a3': '$\\mathtt{z}$', u'\U0001d6a8': '$\\mathbf{\\Alpha}$', u'\U0001d6a9': '$\\mathbf{\\Beta}$', u'\U0001d6aa': '$\\mathbf{\\Gamma}$', u'\U0001d6ab': '$\\mathbf{\\Delta}$', u'\U0001d6ac': '$\\mathbf{\\Epsilon}$', u'\U0001d6ad': '$\\mathbf{\\Zeta}$', u'\U0001d6ae': '$\\mathbf{\\Eta}$', u'\U0001d6af': '$\\mathbf{\\Theta}$', u'\U0001d6b0': '$\\mathbf{\\Iota}$', u'\U0001d6b1': '$\\mathbf{\\Kappa}$', u'\U0001d6b2': '$\\mathbf{\\Lambda}$', u'\U0001d6b3': '$M$', u'\U0001d6b4': '$N$', u'\U0001d6b5': '$\\mathbf{\\Xi}$', u'\U0001d6b6': '$O$', u'\U0001d6b7': '$\\mathbf{\\Pi}$', u'\U0001d6b8': '$\\mathbf{\\Rho}$', u'\U0001d6b9': '{\\mathbf{\\vartheta}}', u'\U0001d6ba': '$\\mathbf{\\Sigma}$', u'\U0001d6bb': '$\\mathbf{\\Tau}$', u'\U0001d6bc': '$\\mathbf{\\Upsilon}$', u'\U0001d6bd': '$\\mathbf{\\Phi}$', u'\U0001d6be': '$\\mathbf{\\Chi}$', u'\U0001d6bf': '$\\mathbf{\\Psi}$', u'\U0001d6c0': '$\\mathbf{\\Omega}$', u'\U0001d6c1': '$\\mathbf{\\nabla}$', u'\U0001d6c2': '$\\mathbf{\\Alpha}$', u'\U0001d6c3': '$\\mathbf{\\Beta}$', u'\U0001d6c4': '$\\mathbf{\\Gamma}$', u'\U0001d6c5': '$\\mathbf{\\Delta}$', u'\U0001d6c6': '$\\mathbf{\\Epsilon}$', u'\U0001d6c7': '$\\mathbf{\\Zeta}$', u'\U0001d6c8': '$\\mathbf{\\Eta}$', u'\U0001d6c9': '$\\mathbf{\\theta}$', u'\U0001d6ca': '$\\mathbf{\\Iota}$', u'\U0001d6cb': '$\\mathbf{\\Kappa}$', u'\U0001d6cc': '$\\mathbf{\\Lambda}$', u'\U0001d6cd': '$M$', u'\U0001d6ce': '$N$', u'\U0001d6cf': '$\\mathbf{\\Xi}$', u'\U0001d6d0': '$O$', u'\U0001d6d1': '$\\mathbf{\\Pi}$', u'\U0001d6d2': '$\\mathbf{\\Rho}$', u'\U0001d6d3': '$\\mathbf{\\varsigma}$', u'\U0001d6d4': '$\\mathbf{\\Sigma}$', u'\U0001d6d5': '$\\mathbf{\\Tau}$', u'\U0001d6d6': '$\\mathbf{\\Upsilon}$', u'\U0001d6d7': '$\\mathbf{\\Phi}$', u'\U0001d6d8': '$\\mathbf{\\Chi}$', u'\U0001d6d9': '$\\mathbf{\\Psi}$', u'\U0001d6da': '$\\mathbf{\\Omega}$', u'\U0001d6db': '$\\partial$', u'\U0001d6dc': '$\\in$', u'\U0001d6dd': '{\\mathbf{\\vartheta}}', u'\U0001d6de': '{\\mathbf{\\varkappa}}', u'\U0001d6df': '{\\mathbf{\\phi}}', u'\U0001d6e0': '{\\mathbf{\\varrho}}', u'\U0001d6e1': '{\\mathbf{\\varpi}}', u'\U0001d6e2': '$\\mathsl{\\Alpha}$', u'\U0001d6e3': '$\\mathsl{\\Beta}$', u'\U0001d6e4': '$\\mathsl{\\Gamma}$', u'\U0001d6e5': '$\\mathsl{\\Delta}$', u'\U0001d6e6': '$\\mathsl{\\Epsilon}$', u'\U0001d6e7': '$\\mathsl{\\Zeta}$', u'\U0001d6e8': '$\\mathsl{\\Eta}$', u'\U0001d6e9': '$\\mathsl{\\Theta}$', u'\U0001d6ea': '$\\mathsl{\\Iota}$', u'\U0001d6eb': '$\\mathsl{\\Kappa}$', u'\U0001d6ec': '$\\mathsl{\\Lambda}$', u'\U0001d6ed': '$M$', u'\U0001d6ee': '$N$', u'\U0001d6ef': '$\\mathsl{\\Xi}$', u'\U0001d6f0': '$O$', u'\U0001d6f1': '$\\mathsl{\\Pi}$', u'\U0001d6f2': '$\\mathsl{\\Rho}$', u'\U0001d6f3': '{\\mathsl{\\vartheta}}', u'\U0001d6f4': '$\\mathsl{\\Sigma}$', u'\U0001d6f5': '$\\mathsl{\\Tau}$', u'\U0001d6f6': '$\\mathsl{\\Upsilon}$', u'\U0001d6f7': '$\\mathsl{\\Phi}$', u'\U0001d6f8': '$\\mathsl{\\Chi}$', u'\U0001d6f9': '$\\mathsl{\\Psi}$', u'\U0001d6fa': '$\\mathsl{\\Omega}$', u'\U0001d6fb': '$\\mathsl{\\nabla}$', u'\U0001d6fc': '$\\mathsl{\\Alpha}$', u'\U0001d6fd': '$\\mathsl{\\Beta}$', u'\U0001d6fe': '$\\mathsl{\\Gamma}$', u'\U0001d6ff': '$\\mathsl{\\Delta}$', u'\U0001d700': '$\\mathsl{\\Epsilon}$', u'\U0001d701': '$\\mathsl{\\Zeta}$', u'\U0001d702': '$\\mathsl{\\Eta}$', u'\U0001d703': '$\\mathsl{\\Theta}$', u'\U0001d704': '$\\mathsl{\\Iota}$', u'\U0001d705': '$\\mathsl{\\Kappa}$', u'\U0001d706': '$\\mathsl{\\Lambda}$', u'\U0001d707': '$M$', u'\U0001d708': '$N$', u'\U0001d709': '$\\mathsl{\\Xi}$', u'\U0001d70a': '$O$', u'\U0001d70b': '$\\mathsl{\\Pi}$', u'\U0001d70c': '$\\mathsl{\\Rho}$', u'\U0001d70d': '$\\mathsl{\\varsigma}$', u'\U0001d70e': '$\\mathsl{\\Sigma}$', u'\U0001d70f': '$\\mathsl{\\Tau}$', u'\U0001d710': '$\\mathsl{\\Upsilon}$', u'\U0001d711': '$\\mathsl{\\Phi}$', u'\U0001d712': '$\\mathsl{\\Chi}$', u'\U0001d713': '$\\mathsl{\\Psi}$', u'\U0001d714': '$\\mathsl{\\Omega}$', u'\U0001d715': '$\\partial$', u'\U0001d716': '$\\in$', u'\U0001d717': '{\\mathsl{\\vartheta}}', u'\U0001d718': '{\\mathsl{\\varkappa}}', u'\U0001d719': '{\\mathsl{\\phi}}', u'\U0001d71a': '{\\mathsl{\\varrho}}', u'\U0001d71b': '{\\mathsl{\\varpi}}', u'\U0001d71c': '$\\mathbit{\\Alpha}$', u'\U0001d71d': '$\\mathbit{\\Beta}$', u'\U0001d71e': '$\\mathbit{\\Gamma}$', u'\U0001d71f': '$\\mathbit{\\Delta}$', u'\U0001d720': '$\\mathbit{\\Epsilon}$', u'\U0001d721': '$\\mathbit{\\Zeta}$', u'\U0001d722': '$\\mathbit{\\Eta}$', u'\U0001d723': '$\\mathbit{\\Theta}$', u'\U0001d724': '$\\mathbit{\\Iota}$', u'\U0001d725': '$\\mathbit{\\Kappa}$', u'\U0001d726': '$\\mathbit{\\Lambda}$', u'\U0001d727': '$M$', u'\U0001d728': '$N$', u'\U0001d729': '$\\mathbit{\\Xi}$', u'\U0001d72a': '$O$', u'\U0001d72b': '$\\mathbit{\\Pi}$', u'\U0001d72c': '$\\mathbit{\\Rho}$', u'\U0001d72d': '{\\mathbit{O}}', u'\U0001d72e': '$\\mathbit{\\Sigma}$', u'\U0001d72f': '$\\mathbit{\\Tau}$', u'\U0001d730': '$\\mathbit{\\Upsilon}$', u'\U0001d731': '$\\mathbit{\\Phi}$', u'\U0001d732': '$\\mathbit{\\Chi}$', u'\U0001d733': '$\\mathbit{\\Psi}$', u'\U0001d734': '$\\mathbit{\\Omega}$', u'\U0001d735': '$\\mathbit{\\nabla}$', u'\U0001d736': '$\\mathbit{\\Alpha}$', u'\U0001d737': '$\\mathbit{\\Beta}$', u'\U0001d738': '$\\mathbit{\\Gamma}$', u'\U0001d739': '$\\mathbit{\\Delta}$', u'\U0001d73a': '$\\mathbit{\\Epsilon}$', u'\U0001d73b': '$\\mathbit{\\Zeta}$', u'\U0001d73c': '$\\mathbit{\\Eta}$', u'\U0001d73d': '$\\mathbit{\\Theta}$', u'\U0001d73e': '$\\mathbit{\\Iota}$', u'\U0001d73f': '$\\mathbit{\\Kappa}$', u'\U0001d740': '$\\mathbit{\\Lambda}$', u'\U0001d741': '$M$', u'\U0001d742': '$N$', u'\U0001d743': '$\\mathbit{\\Xi}$', u'\U0001d744': '$O$', u'\U0001d745': '$\\mathbit{\\Pi}$', u'\U0001d746': '$\\mathbit{\\Rho}$', u'\U0001d747': '$\\mathbit{\\varsigma}$', u'\U0001d748': '$\\mathbit{\\Sigma}$', u'\U0001d749': '$\\mathbit{\\Tau}$', u'\U0001d74a': '$\\mathbit{\\Upsilon}$', u'\U0001d74b': '$\\mathbit{\\Phi}$', u'\U0001d74c': '$\\mathbit{\\Chi}$', u'\U0001d74d': '$\\mathbit{\\Psi}$', u'\U0001d74e': '$\\mathbit{\\Omega}$', u'\U0001d74f': '$\\partial$', u'\U0001d750': '$\\in$', u'\U0001d751': '{\\mathbit{\\vartheta}}', u'\U0001d752': '{\\mathbit{\\varkappa}}', u'\U0001d753': '{\\mathbit{\\phi}}', u'\U0001d754': '{\\mathbit{\\varrho}}', u'\U0001d755': '{\\mathbit{\\varpi}}', u'\U0001d756': '$\\mathsfbf{\\Alpha}$', u'\U0001d757': '$\\mathsfbf{\\Beta}$', u'\U0001d758': '$\\mathsfbf{\\Gamma}$', u'\U0001d759': '$\\mathsfbf{\\Delta}$', u'\U0001d75a': '$\\mathsfbf{\\Epsilon}$', u'\U0001d75b': '$\\mathsfbf{\\Zeta}$', u'\U0001d75c': '$\\mathsfbf{\\Eta}$', u'\U0001d75d': '$\\mathsfbf{\\Theta}$', u'\U0001d75e': '$\\mathsfbf{\\Iota}$', u'\U0001d75f': '$\\mathsfbf{\\Kappa}$', u'\U0001d760': '$\\mathsfbf{\\Lambda}$', u'\U0001d761': '$M$', u'\U0001d762': '$N$', u'\U0001d763': '$\\mathsfbf{\\Xi}$', u'\U0001d764': '$O$', u'\U0001d765': '$\\mathsfbf{\\Pi}$', u'\U0001d766': '$\\mathsfbf{\\Rho}$', u'\U0001d767': '{\\mathsfbf{\\vartheta}}', u'\U0001d768': '$\\mathsfbf{\\Sigma}$', u'\U0001d769': '$\\mathsfbf{\\Tau}$', u'\U0001d76a': '$\\mathsfbf{\\Upsilon}$', u'\U0001d76b': '$\\mathsfbf{\\Phi}$', u'\U0001d76c': '$\\mathsfbf{\\Chi}$', u'\U0001d76d': '$\\mathsfbf{\\Psi}$', u'\U0001d76e': '$\\mathsfbf{\\Omega}$', u'\U0001d76f': '$\\mathsfbf{\\nabla}$', u'\U0001d770': '$\\mathsfbf{\\Alpha}$', u'\U0001d771': '$\\mathsfbf{\\Beta}$', u'\U0001d772': '$\\mathsfbf{\\Gamma}$', u'\U0001d773': '$\\mathsfbf{\\Delta}$', u'\U0001d774': '$\\mathsfbf{\\Epsilon}$', u'\U0001d775': '$\\mathsfbf{\\Zeta}$', u'\U0001d776': '$\\mathsfbf{\\Eta}$', u'\U0001d777': '$\\mathsfbf{\\Theta}$', u'\U0001d778': '$\\mathsfbf{\\Iota}$', u'\U0001d779': '$\\mathsfbf{\\Kappa}$', u'\U0001d77a': '$\\mathsfbf{\\Lambda}$', u'\U0001d77b': '$M$', u'\U0001d77c': '$N$', u'\U0001d77d': '$\\mathsfbf{\\Xi}$', u'\U0001d77e': '$O$', u'\U0001d77f': '$\\mathsfbf{\\Pi}$', u'\U0001d780': '$\\mathsfbf{\\Rho}$', u'\U0001d781': '$\\mathsfbf{\\varsigma}$', u'\U0001d782': '$\\mathsfbf{\\Sigma}$', u'\U0001d783': '$\\mathsfbf{\\Tau}$', u'\U0001d784': '$\\mathsfbf{\\Upsilon}$', u'\U0001d785': '$\\mathsfbf{\\Phi}$', u'\U0001d786': '$\\mathsfbf{\\Chi}$', u'\U0001d787': '$\\mathsfbf{\\Psi}$', u'\U0001d788': '$\\mathsfbf{\\Omega}$', u'\U0001d789': '$\\partial$', u'\U0001d78a': '$\\in$', u'\U0001d78b': '{\\mathsfbf{\\vartheta}}', u'\U0001d78c': '{\\mathsfbf{\\varkappa}}', u'\U0001d78d': '{\\mathsfbf{\\phi}}', u'\U0001d78e': '{\\mathsfbf{\\varrho}}', u'\U0001d78f': '{\\mathsfbf{\\varpi}}', u'\U0001d790': '$\\mathsfbfsl{\\Alpha}$', u'\U0001d791': '$\\mathsfbfsl{\\Beta}$', u'\U0001d792': '$\\mathsfbfsl{\\Gamma}$', u'\U0001d793': '$\\mathsfbfsl{\\Delta}$', u'\U0001d794': '$\\mathsfbfsl{\\Epsilon}$', u'\U0001d795': '$\\mathsfbfsl{\\Zeta}$', u'\U0001d796': '$\\mathsfbfsl{\\Eta}$', u'\U0001d797': '$\\mathsfbfsl{\\vartheta}$', u'\U0001d798': '$\\mathsfbfsl{\\Iota}$', u'\U0001d799': '$\\mathsfbfsl{\\Kappa}$', u'\U0001d79a': '$\\mathsfbfsl{\\Lambda}$', u'\U0001d79b': '$M$', u'\U0001d79c': '$N$', u'\U0001d79d': '$\\mathsfbfsl{\\Xi}$', u'\U0001d79e': '$O$', u'\U0001d79f': '$\\mathsfbfsl{\\Pi}$', u'\U0001d7a0': '$\\mathsfbfsl{\\Rho}$', u'\U0001d7a1': '{\\mathsfbfsl{\\vartheta}}', u'\U0001d7a2': '$\\mathsfbfsl{\\Sigma}$', u'\U0001d7a3': '$\\mathsfbfsl{\\Tau}$', u'\U0001d7a4': '$\\mathsfbfsl{\\Upsilon}$', u'\U0001d7a5': '$\\mathsfbfsl{\\Phi}$', u'\U0001d7a6': '$\\mathsfbfsl{\\Chi}$', u'\U0001d7a7': '$\\mathsfbfsl{\\Psi}$', u'\U0001d7a8': '$\\mathsfbfsl{\\Omega}$', u'\U0001d7a9': '$\\mathsfbfsl{\\nabla}$', u'\U0001d7aa': '$\\mathsfbfsl{\\Alpha}$', u'\U0001d7ab': '$\\mathsfbfsl{\\Beta}$', u'\U0001d7ac': '$\\mathsfbfsl{\\Gamma}$', u'\U0001d7ad': '$\\mathsfbfsl{\\Delta}$', u'\U0001d7ae': '$\\mathsfbfsl{\\Epsilon}$', u'\U0001d7af': '$\\mathsfbfsl{\\Zeta}$', u'\U0001d7b0': '$\\mathsfbfsl{\\Eta}$', u'\U0001d7b1': '$\\mathsfbfsl{\\vartheta}$', u'\U0001d7b2': '$\\mathsfbfsl{\\Iota}$', u'\U0001d7b3': '$\\mathsfbfsl{\\Kappa}$', u'\U0001d7b4': '$\\mathsfbfsl{\\Lambda}$', u'\U0001d7b5': '$M$', u'\U0001d7b6': '$N$', u'\U0001d7b7': '$\\mathsfbfsl{\\Xi}$', u'\U0001d7b8': '$O$', u'\U0001d7b9': '$\\mathsfbfsl{\\Pi}$', u'\U0001d7ba': '$\\mathsfbfsl{\\Rho}$', u'\U0001d7bb': '$\\mathsfbfsl{\\varsigma}$', u'\U0001d7bc': '$\\mathsfbfsl{\\Sigma}$', u'\U0001d7bd': '$\\mathsfbfsl{\\Tau}$', u'\U0001d7be': '$\\mathsfbfsl{\\Upsilon}$', u'\U0001d7bf': '$\\mathsfbfsl{\\Phi}$', u'\U0001d7c0': '$\\mathsfbfsl{\\Chi}$', u'\U0001d7c1': '$\\mathsfbfsl{\\Psi}$', u'\U0001d7c2': '$\\mathsfbfsl{\\Omega}$', u'\U0001d7c3': '$\\partial$', u'\U0001d7c4': '$\\in$', u'\U0001d7c5': '{\\mathsfbfsl{\\vartheta}}', u'\U0001d7c6': '{\\mathsfbfsl{\\varkappa}}', u'\U0001d7c7': '{\\mathsfbfsl{\\phi}}', u'\U0001d7c8': '{\\mathsfbfsl{\\varrho}}', u'\U0001d7c9': '{\\mathsfbfsl{\\varpi}}', u'\U0001d7ce': '$\\mathbf{0}$', u'\U0001d7cf': '$\\mathbf{1}$', u'\U0001d7d0': '$\\mathbf{2}$', u'\U0001d7d1': '$\\mathbf{3}$', u'\U0001d7d2': '$\\mathbf{4}$', u'\U0001d7d3': '$\\mathbf{5}$', u'\U0001d7d4': '$\\mathbf{6}$', u'\U0001d7d5': '$\\mathbf{7}$', u'\U0001d7d6': '$\\mathbf{8}$', u'\U0001d7d7': '$\\mathbf{9}$', u'\U0001d7d8': '$\\mathbb{0}$', u'\U0001d7d9': '$\\mathbb{1}$', u'\U0001d7da': '$\\mathbb{2}$', u'\U0001d7db': '$\\mathbb{3}$', u'\U0001d7dc': '$\\mathbb{4}$', u'\U0001d7dd': '$\\mathbb{5}$', u'\U0001d7de': '$\\mathbb{6}$', u'\U0001d7df': '$\\mathbb{7}$', u'\U0001d7e0': '$\\mathbb{8}$', u'\U0001d7e1': '$\\mathbb{9}$', u'\U0001d7e2': '$\\mathsf{0}$', u'\U0001d7e3': '$\\mathsf{1}$', u'\U0001d7e4': '$\\mathsf{2}$', u'\U0001d7e5': '$\\mathsf{3}$', u'\U0001d7e6': '$\\mathsf{4}$', u'\U0001d7e7': '$\\mathsf{5}$', u'\U0001d7e8': '$\\mathsf{6}$', u'\U0001d7e9': '$\\mathsf{7}$', u'\U0001d7ea': '$\\mathsf{8}$', u'\U0001d7eb': '$\\mathsf{9}$', u'\U0001d7ec': '$\\mathsfbf{0}$', u'\U0001d7ed': '$\\mathsfbf{1}$', u'\U0001d7ee': '$\\mathsfbf{2}$', u'\U0001d7ef': '$\\mathsfbf{3}$', u'\U0001d7f0': '$\\mathsfbf{4}$', u'\U0001d7f1': '$\\mathsfbf{5}$', u'\U0001d7f2': '$\\mathsfbf{6}$', u'\U0001d7f3': '$\\mathsfbf{7}$', u'\U0001d7f4': '$\\mathsfbf{8}$', u'\U0001d7f5': '$\\mathsfbf{9}$', u'\U0001d7f6': '$\\mathtt{0}$', u'\U0001d7f7': '$\\mathtt{1}$', u'\U0001d7f8': '$\\mathtt{2}$', u'\U0001d7f9': '$\\mathtt{3}$', u'\U0001d7fa': '$\\mathtt{4}$', u'\U0001d7fb': '$\\mathtt{5}$', u'\U0001d7fc': '$\\mathtt{6}$', u'\U0001d7fd': '$\\mathtt{7}$', u'\U0001d7fe': '$\\mathtt{8}$', u'\U0001d7ff': '$\\mathtt{9}$'}
garinh/cs
docs/support/docutils/writers/unicode_latex.py
Python
lgpl-2.1
73,667
[ "Bowtie" ]
fd96364111c87e34b32acac74798fa861fff08b9f371c85715a55d252f35350b
from builtins import range, str import numpy as np from scipy.special import erf try: from scipy.weave import inline except ImportError as e: try: from weave import inline except ImportError as e: pass from peri.special import functions from peri.comp import Component from peri.util import Tile, cdd, listify, delistify # maximum number of iterations to get an exact volume MAX_VOLUME_ITERATIONS = 10 #============================================================================= # Superclass for collections of particles #============================================================================= class PlatonicParticlesCollection(Component): def __init__(self, pos, shape=None, param_prefix='sph', category='obj', support_pad=4, float_precision=np.float64): """ Parent class for a large collection of particles, such as spheres or points or ellipsoids or rods. This class is good for a collection of objects which each have a position as well as (possibly) some other parameters, like particle radius, aspect ratio, or brightness. Its .get() method returns a field of the drawn particles, selected on the current tile. Any daughter classes need the following methods: * _draw_particle * _update_type * setup_variables * get_values * set_values * add_particle * remove_particle In addition, the following methods should be modified for particles with more parameters than just positions: * _drawargs * _tile * param_particle * exports * _p2i If you have a few objects to group, like 2 or 3 slabs, group them with a `peri.comp.ComponentCollection` instead. Parameters ---------- pos : ndarray [N,d] Initial positions of the particles. Re-cast as float internally shape : ``peri.util.Tile``, optional Shape of the field over which to draw the platonic spheres. Default is None. param_prefix : string, optional Prefix for the particle parameter names. Default is `'sph'` category : string, optional Category, as in comp.Component. Default is `'obj'`. support_pad : Int, optional How much to pad the boundary of particles when calculating the support so that particles do not leak out the edges. Default is 4 float_precision : numpy float datatype, optional One of numpy.float16, numpy.float32, numpy.float64; precision for precomputed arrays. Default is np.float64; make it 16 or 32 to save memory. """ if pos.ndim != 2: raise ValueError('pos must be of shape (N,d)') self.category = category self.support_pad = support_pad self.pos = pos.astype('float') self.param_prefix = param_prefix if float_precision not in (np.float64, np.float32, np.float16): raise ValueError('float_precision must be one of np.float64, ' + 'np.float32, np.float16') self.float_precision = float_precision self.shape = shape self.setup_variables() if self.shape: self.inner = self.shape.copy() self.tile = self.inner.copy() self.initialize() def _draw_particle(self, pos, sign=1): """ Updates ``self.particles`` by drawing a particle at position ``pos``, with possible additional unnamed arguments between ``pos`` and ``sign``. If ``sign`` is -1, un-draws the particle instead. To be able to fit this component in a model, _draw_particle must create an image that is numerically continuous as pos changes -- i.e. the edge of the particle must alias smoothly to 0. """ raise NotImplementedError('Implement in subclasss') def _update_type(self, params): """ Given a list of parameters, returns a bool of whether or not any of the parameters require a global update, and a list of particle indices which are included in ``params``, e.g. ``return doglobal, particles`` """ raise NotImplementedError('Implement in subclasss') def setup_variables(self): """Creates an ordered list of parameters and stores in self._params""" raise NotImplementedError('Implement in subclass') def get_values(self, params): """ Returns a util.delisty-d assortment of values and parameters, e.g. (get values for the parameters, both particle positions and globals) (return delistify(values, params)) """ #FIXME seems stupid that this can't be done intelligently, like with #a Component's dict raise NotImplementedError('Implement in subclasss') def set_values(self, params, values): """Sets the parameters and values""" #FIXME seems stupid that this can't be done intelligently, like with #a Component's dict raise NotImplementedError('Implement in subclasss') def add_particle(pos): raise NotImplementedError('Implement in subclasss') def remove_particle(pos): raise NotImplementedError('Implement in subclasss') def _drawargs(self): """ Returns a list of arguments for self._draw_particle, of the same length as `self.pos`. For example, if drawing a sphere, _drawargs could return a list of radii. """ return [[] for p in self.pos] def _tile(self, n): """Get the update tile surrounding particle `n` """ pos = self._trans(self.pos[n]) return Tile(pos, pos).pad(self.support_pad) def param_particle(self, ind): return self.param_particle_pos(ind) def exports(self): return [ self.add_particle, self.remove_particle, self.closest_particle, self.get_positions ] def _p2i(self, param): """ Parameter to indices, returns (coord, index), e.g. for a pos pos : ('x', 100) """ g = param.split('-') if len(g) == 3: return g[2], int(g[1]) else: raise ValueError('`param` passed as incorrect format') def initialize(self): """Start from scratch and initialize all objects / draw self.particles""" self.particles = np.zeros(self.shape.shape, dtype=self.float_precision) for p0, arg0 in zip(self.pos, self._drawargs()): self._draw_particle(p0, *listify(arg0)) def get(self): return self.particles[self.tile.slicer] @property def N(self): return self.pos.shape[0] def _vps(self, inds): """Clips a list of inds to be on [0, self.N]""" return [j for j in inds if j >= 0 and j < self.N] def param_positions(self): """ Return params of all positions """ return self.param_particle_pos(list(range(self.N))) def param_particle_pos(self, ind): """ Get position of one or more particles """ #FIXME assumes 3D and x,y,z labels right now.... ind = self._vps(listify(ind)) return [self._i2p(i, j) for i in ind for j in ['z', 'y', 'x']] def _trans(self, pos): return pos + self.inner.l def get_positions(self): return self.pos.copy() def closest_particle(self, x): """ Get the index of the particle closest to vector `x` """ return (((self.pos - x)**2).sum(axis=-1)).argmin() @property def params(self): return self._params @property def values(self): return self.get_values(self._params) def _i2p(self, ind, coord): """ Translate index info to parameter name """ return '-'.join([self.param_prefix, str(ind), coord]) def get_update_tile(self, params, values): """ Get the amount of support size required for a particular update.""" doglobal, particles = self._update_type(params) if doglobal: return self.shape.copy() # 1) store the current parameters of interest values0 = self.get_values(params) # 2) calculate the current tileset tiles0 = [self._tile(n) for n in particles] # 3) update to newer parameters and calculate tileset self.set_values(params, values) tiles1 = [self._tile(n) for n in particles] # 4) revert parameters & return union of all tiles self.set_values(params, values0) return Tile.boundingtile(tiles0 + tiles1) def update(self, params, values): """ Update the particles field given new parameter values """ #1. Figure out if we're going to do a global update, in which # case we just draw from scratch. global_update, particles = self._update_type(params) # if we are doing a global update, everything must change, so # starting fresh will be faster instead of add subtract if global_update: self.set_values(params, values) self.initialize() return # otherwise, update individual particles. delete the current versions # of the particles update the particles, and redraw them anew at the # places given by (params, values) oldargs = self._drawargs() for n in particles: self._draw_particle(self.pos[n], *listify(oldargs[n]), sign=-1) self.set_values(params, values) newargs = self._drawargs() for n in particles: self._draw_particle(self.pos[n], *listify(newargs[n]), sign=+1) def __str__(self): return "{} N={}".format(self.__class__.__name__, self.N) #============================================================================= # Forms of the platonic sphere interpolation function #============================================================================= def norm(a): return np.sqrt((a**2).sum(axis=-1)) def inner(r, p, a, zscale=1.0): eps = np.array([1,1,1])*1e-8 s = np.array([zscale, 1.0, 1.0]) d = (r-p-eps)*s n = norm(d) dhat = d / n[...,None] o = norm((d - a*dhat)/s) return o * np.sign(n - a) def sphere_bool(dr, a, alpha): return 1.0*(dr < 0) def sphere_lerp(dr, a, alpha): """ Linearly interpolate the pixels for the platonic object """ return (1-np.clip((dr+alpha) / (2*alpha), 0, 1)) def sphere_logistic(dr, a, alpha): """ Classic logistic interpolation """ return 1.0/(1.0 + np.exp(alpha*dr)) def sphere_triangle_cdf(dr, a, alpha): """ Cumulative distribution function for the traingle distribution """ p0 = (dr+alpha)**2/(2*alpha**2)*(0 > dr)*(dr>-alpha) p1 = 1*(dr>0)-(alpha-dr)**2/(2*alpha**2)*(0<dr)*(dr<alpha) return (1-np.clip(p0+p1, 0, 1)) def sphere_analytical_gaussian(dr, a, alpha=0.2765): """ Analytically calculate the sphere's functional form by convolving the Heavyside function with first order approximation to the sinc, a Gaussian. The alpha parameters controls the width of the approximation -- should be 1, but is fit to be roughly 0.2765 """ term1 = 0.5*(erf((dr+2*a)/(alpha*np.sqrt(2))) + erf(-dr/(alpha*np.sqrt(2)))) term2 = np.sqrt(0.5/np.pi)*(alpha/(dr+a+1e-10)) * ( np.exp(-0.5*dr**2/alpha**2) - np.exp(-0.5*(dr+2*a)**2/alpha**2) ) return term1 - term2 def sphere_analytical_gaussian_trim(dr, a, alpha=0.2765, cut=1.6): """ See sphere_analytical_gaussian_exact. I trimmed to terms from the functional form that are essentially zero (1e-8) for r0 > cut (~1.5), a fine approximation for these platonic anyway. """ m = np.abs(dr) <= cut # only compute on the relevant scales rr = dr[m] t = -rr/(alpha*np.sqrt(2)) q = 0.5*(1 + erf(t)) - np.sqrt(0.5/np.pi)*(alpha/(rr+a+1e-10)) * np.exp(-t*t) # fill in the grid, inside the interpolation and outside where values are constant ans = 0*dr ans[m] = q ans[dr > cut] = 0 ans[dr < -cut] = 1 return ans def sphere_analytical_gaussian_fast(dr, a, alpha=0.2765, cut=1.20): """ See sphere_analytical_gaussian_trim, but implemented in C with fast erf and exp approximations found at Abramowitz and Stegun: Handbook of Mathematical Functions A Fast, Compact Approximation of the Exponential Function The default cut 1.25 was chosen based on the accuracy of fast_erf """ code = """ double coeff1 = 1.0/(alpha*sqrt(2.0)); double coeff2 = sqrt(0.5/pi)*alpha; for (int i=0; i<N; i++){ double dri = dr[i]; if (dri < cut && dri > -cut){ double t = -dri*coeff1; ans[i] = 0.5*(1+fast_erf(t)) - coeff2/(dri+a+1e-10) * fast_exp(-t*t); } else { ans[i] = 0.0*(dri > cut) + 1.0*(dri < -cut); } } """ shape = r.shape r = r.flatten() N = self.N ans = r*0 pi = np.pi inline(code, arg_names=['dr', 'a', 'alpha', 'cut', 'ans', 'pi', 'N'], support_code=functions, verbose=0) return ans.reshape(shape) def sphere_constrained_cubic(dr, a, alpha): """ Sphere generated by a cubic interpolant constrained to be (1,0) on (r0-sqrt(3)/2, r0+sqrt(3)/2), the size of the cube in the (111) direction. """ sqrt3 = np.sqrt(3) b_coeff = a*0.5/sqrt3*(1 - 0.6*sqrt3*alpha)/(0.15 + a*a) rscl = np.clip(dr, -0.5*sqrt3, 0.5*sqrt3) a, d = rscl + 0.5*sqrt3, rscl - 0.5*sqrt3 return alpha*d*a*rscl + b_coeff*d*a - d/sqrt3 try: sphere_analytical_gaussian_fast(np.linspace(0,10,10), 5.0) except Exception as e: sphere_analytical_gaussian_fast = sphere_analytical_gaussian_trim def exact_volume_sphere(rvec, pos, radius, zscale=1.0, volume_error=1e-5, function=sphere_analytical_gaussian, max_radius_change=1e-2, args=()): """ Perform an iterative method to calculate the effective sphere that perfectly (up to the volume_error) conserves volume. Return the resulting image """ vol_goal = 4./3*np.pi*radius**3 / zscale rprime = radius dr = inner(rvec, pos, rprime, zscale=zscale) t = function(dr, rprime, *args) for i in range(MAX_VOLUME_ITERATIONS): vol_curr = np.abs(t.sum()) if np.abs(vol_goal - vol_curr)/vol_goal < volume_error: break rprime = rprime + 1.0*(vol_goal - vol_curr) / (4*np.pi*rprime**2) if np.abs(rprime - radius)/radius > max_radius_change: break dr = inner(rvec, pos, rprime, zscale=zscale) t = function(dr, rprime, *args) return t #============================================================================= # Actual sphere collection (and slab) #============================================================================= class PlatonicSpheresCollection(PlatonicParticlesCollection): def __init__(self, pos, rad, shape=None, zscale=1.0, support_pad=4, method='exact-gaussian-fast', alpha=None, user_method=None, exact_volume=True, volume_error=1e-5, max_radius_change=1e-2, param_prefix='sph', grouping='particle', category='obj', float_precision=np.float64): """ A collection of spheres in real-space with positions and radii, drawn not necessarily on a uniform grid (i.e. scale factor associated with z-direction). There are many ways to draw the sphere, currently supported methods can be one of:: [ 'bool', 'lerp', 'logistic', 'triangle', 'constrained-cubic', 'exact-gaussian', 'exact-gaussian-trim', 'exact-gaussian-fast', 'user-defined' ] Parameters ---------- pos : ndarray [N,3] Initial positions of the spheres rad : ndarray [N] or float Initial radii of the spheres shape : tuple Shape of the field over which to draw the platonic spheres zscale : float scaling of z-pixels in the platonic image support_pad : int how much to pad the boundary of particles when calculating support so that there is not more contribution method : string The sphere drawing function to use, see above. alpha : float Parameter supplied to sphere drawing function, set to value to override default value user_method : tuple (function, parameters) Provide your own sphere function to the drawing method. First element of tuple is function with call signature `func(dr, a, *args)` where the second element is the `*args` that are not the distance to edge (dr) or particles radius (a). `method` must be set to 'user-defined'. exact_volume : boolean whether to iterate effective particle size until exact volume (within volume_error) is achieved volume_error : float relative volume error tolerance in iteration steps max_radius_change : float maximum relative radius change allowed during iteration (due to edge particles and other confounding factors) grouping : string Either 'particle' or 'parameter' parameter grouping. If 'particle' then grouped by xyza,xyza if 'parameter' then xyz,xyz,a,a float_precision : numpy float datatype One of numpy.float16, numpy.float32, numpy.float64; precision for precomputed arrays. Default is np.float64; make it 16 or 32 to save memory. """ if isinstance(rad, (float, int)): rad = rad*np.ones(pos.shape[0]) if rad.size != pos.shape[0]: raise ValueError('pos, rad must have the same number of particles.') if pos.ndim != 2: raise ValueError('pos must be of shape (N,3)') self.rad = rad.astype('float') self.zscale = zscale self.exact_volume = exact_volume self.volume_error = volume_error self.max_radius_change = max_radius_change self.user_method = user_method self.grouping = grouping self.set_draw_method(method=method, alpha=alpha, user_method=user_method) super(PlatonicSpheresCollection, self).__init__(pos=pos, shape=shape, param_prefix=param_prefix, category=category, support_pad= support_pad, float_precision=float_precision) def _drawargs(self): return self.rad def setup_variables(self): self._params = [] if self.grouping == 'parameter': for i, p0 in enumerate(self.pos): self._params.extend([self._i2p(i, c) for c in ['z','y','x']]) for i, r0 in enumerate(self.rad): self._params.extend([self._i2p(i, c) for c in ['a']]) else: for i, (p0, r0) in enumerate(zip(self.pos, self.rad)): self._params.extend([self._i2p(i, c) for c in ['z','y','x','a']]) self._params += ['zscale'] def get_values(self, params): values = [] for p in listify(params): typ, ind = self._p2i(p) if typ == 'zscale': values.append(self.zscale) elif typ == 'x': values.append(self.pos[ind][2]) elif typ == 'y': values.append(self.pos[ind][1]) elif typ == 'z': values.append(self.pos[ind][0]) elif typ == 'a': values.append(self.rad[ind]) return delistify(values, params) def set_values(self, params, values): for p,v in zip(listify(params), listify(values)): typ, ind = self._p2i(p) if typ == 'zscale': self.zscale = v elif typ == 'x': self.pos[ind][2] = v elif typ == 'y': self.pos[ind][1] = v elif typ == 'z': self.pos[ind][0] = v elif typ == 'a': self.rad[ind] = v def set_draw_method(self, method, alpha=None, user_method=None): self.methods = [ 'lerp', 'logistic', 'triangle', 'constrained-cubic', 'exact-gaussian', 'exact-gaussian-trim', 'exact-gaussian-fast', 'user-defined' ] self.sphere_functions = { 'bool': sphere_bool, 'lerp': sphere_lerp, 'logistic': sphere_logistic, 'triangle': sphere_triangle_cdf, 'exact-gaussian': sphere_analytical_gaussian, 'exact-gaussian-trim': sphere_analytical_gaussian_trim, 'exact-gaussian-fast': sphere_analytical_gaussian_fast, 'constrained-cubic': sphere_constrained_cubic } self.alpha_defaults = { 'bool': 0, 'lerp': 0.4539, 'logistic': 6.5, 'triangle': 0.6618, 'exact-gaussian': 0.27595, 'exact-gaussian-trim': 0.27595, 'exact-gaussian-fast': 0.27595, 'constrained-cubic': 0.84990, } if user_method: self.sphere_functions['user-defined'] = user_method[0] self.alpha_defaults['user-defined'] = user_method[1] self.method = method if alpha is not None: self.alpha = tuple(listify(alpha)) else: self.alpha = tuple(listify(self.alpha_defaults[self.method])) def _draw_particle(self, pos, rad, sign=1): # we can't draw 0 radius particles correctly, abort if rad == 0.0: return # translate to its actual position in the padded image pos = self._trans(pos) p = np.round(pos) r = np.round(np.array([1.0/self.zscale,1,1])*np.ceil(rad)+self.support_pad) tile = Tile(p-r, p+r, 0, self.shape.shape) rvec = tile.coords(form='vector') # if required, do an iteration to find the best radius to produce # the goal volume as given by the particular goal radius if self.exact_volume: t = sign*exact_volume_sphere( rvec, pos, rad, zscale=self.zscale, volume_error=self.volume_error, function=self.sphere_functions[self.method], args=self.alpha, max_radius_change=self.max_radius_change ) else: # calculate the anti-aliasing according to the interpolation type dr = inner(rvec, pos, rad, zscale=self.zscale) t = sign*self.sphere_functions[self.method](dr, rad, *self.alpha) self.particles[tile.slicer] += t def param_radii(self): """ Return params of all radii """ return [self._i2p(i, 'a') for i in range(self.N)] def param_particle(self, ind): """ Get position and radius of one or more particles """ ind = self._vps(listify(ind)) return [self._i2p(i, j) for i in ind for j in ['z', 'y', 'x', 'a']] def param_particle_pos(self, ind): """ Get position of one or more particles """ ind = self._vps(listify(ind)) return [self._i2p(i, j) for i in ind for j in ['z', 'y', 'x']] def param_particle_rad(self, ind): """ Get radius of one or more particles """ ind = self._vps(listify(ind)) return [self._i2p(i, 'a') for i in ind] def add_particle(self, pos, rad): """ Add a particle or list of particles given by a list of positions and radii, both need to be array-like. Parameters ---------- pos : array-like [N, 3] Positions of all new particles rad : array-like [N] Corresponding radii of new particles Returns ------- inds : N-element numpy.ndarray. Indices of the added particles. """ rad = listify(rad) # add some zero mass particles to the list (same as not having these # particles in the image, which is true at this moment) inds = np.arange(self.N, self.N+len(rad)) self.pos = np.vstack([self.pos, pos]) self.rad = np.hstack([self.rad, np.zeros(len(rad))]) # update the parameters globally self.setup_variables() self.trigger_parameter_change() # now request a drawing of the particle plz params = self.param_particle_rad(inds) self.trigger_update(params, rad) return inds def remove_particle(self, inds): """ Remove the particle at index `inds`, may be a list. Returns [3,N], [N] element numpy.ndarray of pos, rad. """ if self.rad.shape[0] == 0: return inds = listify(inds) # Here's the game plan: # 1. get all positions and sizes of particles that we will be # removing (to return to user) # 2. redraw those particles to 0.0 radius # 3. remove the particles and trigger changes # However, there is an issue -- if there are two particles at opposite # ends of the image, it will be significantly slower than usual pos = self.pos[inds].copy() rad = self.rad[inds].copy() self.trigger_update(self.param_particle_rad(inds), np.zeros(len(inds))) self.pos = np.delete(self.pos, inds, axis=0) self.rad = np.delete(self.rad, inds, axis=0) # update the parameters globally self.setup_variables() self.trigger_parameter_change() return np.array(pos).reshape(-1,3), np.array(rad).reshape(-1) def get_radii(self): return self.rad.copy() def exports(self): return (super(PlatonicSpheresCollection, self).exports() + [self.get_radii]) def _p2i(self, param): """ Parameter to indices, returns (coord, index). Therefore, for a pos : ('x', 100) rad : ('a', 100) zscale : ('zscale, None) """ g = param.split('-') if len(g) == 1: return 'zscale', None if len(g) == 3: return g[2], int(g[1]) def _update_type(self, params): """ Returns dozscale and particle list of update """ dozscale = False particles = [] for p in listify(params): typ, ind = self._p2i(p) particles.append(ind) dozscale = dozscale or typ == 'zscale' particles = set(particles) return dozscale, particles def _tile(self, n): """ Get the tile surrounding particle `n` """ zsc = np.array([1.0/self.zscale, 1, 1]) pos, rad = self.pos[n], self.rad[n] pos = self._trans(pos) return Tile(pos - zsc*rad, pos + zsc*rad).pad(self.support_pad) def update(self, params, values): """Calls an update, but clips radii to be > 0""" # radparams = self.param_radii() params = listify(params) values = listify(values) for i, p in enumerate(params): # if (p in radparams) & (values[i] < 0): if (p[-2:] == '-a') and (values[i] < 0): values[i] = 0.0 super(PlatonicSpheresCollection, self).update(params, values) def __str__(self): return "{} N={}, zscale={}".format(self.__class__.__name__, self.N, self.zscale) def __repr__(self): return self.__str__() def __getstate__(self): odict = self.__dict__.copy() cdd(odict, super(PlatonicSpheresCollection, self).nopickle()) cdd(odict, ['rvecs', 'particles', '_params']) return odict def __setstate__(self, idict): self.__dict__.update(idict) ##Compatibility patches... self.float_precision = self.__dict__.get('float_precision', np.float64) ##end compatibility patch self.setup_variables() if self.shape: self.initialize() #============================================================================= # Coverslip half plane class #============================================================================= class Slab(Component): def __init__(self, zpos=0, angles=(0,0), param_prefix='slab', shape=None, float_precision=np.float64, category='obj'): """ A half plane corresponding to a cover-slip. Parameters ---------- shape : tuple field shape over which to calculate zpos : float position of the center of the slab in pixels angles : tuple of float (2,), optional Euler-like Angles of rotation of the normal with respect to the z-axis, i.e. ``angles=(0., 0.)`` gives a slab with a normal along z. The first angle theta is the rotation about the x-axis; the second angle phi is the rotation about the y-axis. Default is (0,0). float_precision : numpy float datatype One of numpy.float16, numpy.float32, numpy.float64; precision for precomputed arrays. Default is np.float64; make it 16 or 32 to save memory. """ self.lbl_zpos = param_prefix+'-zpos' self.lbl_theta = param_prefix+'-theta' self.lbl_phi = param_prefix+'-phi' if float_precision not in (np.float64, np.float32, np.float16): raise ValueError('float_precision must be one of np.float64, ' + 'np.float32, np.float16') self.float_precision = float_precision params = [self.lbl_zpos, self.lbl_theta, self.lbl_phi] values = [float(i) for i in [zpos, angles[0], angles[1]]] super(Slab, self).__init__(params, values, ordered=False, category=category) if shape: inner = shape.copy() #same default as Platonic Sphere Collection self.set_shape(shape, inner) self.set_tile(self.shape) if self.shape: self.initialize() def rmatrix(self): """ Generate the composite rotation matrix that rotates the slab normal. The rotation is a rotation about the x-axis, followed by a rotation about the z-axis. """ t = self.param_dict[self.lbl_theta] r0 = np.array([ [np.cos(t), -np.sin(t), 0], [np.sin(t), np.cos(t), 0], [0, 0, 1]]) p = self.param_dict[self.lbl_phi] r1 = np.array([ [np.cos(p), 0, np.sin(p)], [0, 1, 0], [-np.sin(p), 0, np.cos(p)]]) return np.dot(r1, r0) def normal(self): return np.dot(self.rmatrix(), np.array([1,0,0])) def _setup(self): self.rvecs = self.shape.coords(form='broadcast') self.image = np.zeros(self.shape.shape, dtype=self.float_precision) def _draw_slab(self): # for the position at zpos, and the center in the x-y plane pos = np.array([ self.param_dict[self.lbl_zpos], self.shape.shape[1]//2, self.shape.shape[2]//2 ]) pos = pos + self.inner.l p = (np.sum([r*n for r, n in zip(self.rvecs, self.normal())]) - pos.dot(self.normal())) m1 = p < -4. m0 = p > 4. mp = ~(m1 | m0) self.image[m1] = 1. self.image[mp] = 1.0/(1.0 + np.exp(7*p[mp])) #FIXME why is this not an erf??? self.image[m0] = 0. def initialize(self): self._setup() self._draw_slab() def set_tile(self, tile): self.tile = tile def update(self, params, values): super(Slab, self).update(params, values) self._draw_slab() def get(self): return self.image[self.tile.slicer] def get_update_tile(self, params, values): return self.shape.copy() def __getstate__(self): odict = self.__dict__.copy() cdd(odict, super(Slab, self).nopickle()) cdd(odict, ['rvecs', 'image']) return odict def __setstate__(self, idict): self.__dict__.update(idict) ##Compatibility patches... self.float_precision = self.__dict__.get('float_precision', np.float64) ##end compatibility patch if self.shape: self.initialize() def __str__(self): return self.__repr__() def __repr__(self): return "{} <{}>".format( str(self.__class__.__name__), self.param_dict )
peri-source/peri
peri/comp/objs.py
Python
mit
32,566
[ "Gaussian" ]
500ed0ba99858f54fc73d6489d23791e404eff2cd67467e899a6b6b54a247c69
""" Certificate HTML webview. """ import logging from datetime import datetime from uuid import uuid4 import pytz import six from django.conf import settings from django.contrib.auth.decorators import login_required from django.http import Http404, HttpResponse from django.template import RequestContext from django.utils import translation from django.utils.encoding import smart_str from eventtracking import tracker from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from organizations import api as organizations_api from common.djangoapps.edxmako.shortcuts import render_to_response from common.djangoapps.edxmako.template import Template from common.djangoapps.student.models import LinkedInAddToProfileConfiguration from common.djangoapps.util.date_utils import strftime_localized from common.djangoapps.util.views import handle_500 from lms.djangoapps.badges.events.course_complete import get_completion_badge from lms.djangoapps.badges.utils import badges_enabled from lms.djangoapps.certificates.api import ( get_active_web_certificate, get_certificate_footer_context, get_certificate_header_context, get_certificate_template ) from lms.djangoapps.certificates.models import ( CertificateGenerationCourseSetting, CertificateHtmlViewConfiguration, CertificateSocialNetworks, CertificateStatuses, GeneratedCertificate ) from lms.djangoapps.certificates.permissions import PREVIEW_CERTIFICATES from lms.djangoapps.certificates.utils import emit_certificate_event, get_certificate_url from lms.djangoapps.courseware.courses import get_course_by_id from openedx.core.djangoapps.catalog.utils import get_course_run_details from openedx.core.djangoapps.certificates.api import certificates_viewable_for_course, display_date_for_certificate from openedx.core.djangoapps.lang_pref.api import get_closest_released_language from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers from openedx.core.lib.courses import course_image_url log = logging.getLogger(__name__) _ = translation.ugettext INVALID_CERTIFICATE_TEMPLATE_PATH = 'certificates/invalid.html' def get_certificate_description(mode, certificate_type, platform_name): """ :return certificate_type_description on the basis of current mode """ certificate_type_description = None if mode == 'honor': # Translators: This text describes the 'Honor' course certificate type. certificate_type_description = _(u"An {cert_type} certificate signifies that a " u"learner has agreed to abide by the honor code established by " u"{platform_name} and has completed all of the required tasks for this course " u"under its guidelines.").format(cert_type=certificate_type, platform_name=platform_name) elif mode == 'verified': # Translators: This text describes the 'ID Verified' course certificate type, which is a higher level of # verification offered by edX. This type of verification is useful for professional education/certifications certificate_type_description = _(u"A {cert_type} certificate signifies that a " u"learner has agreed to abide by the honor code established by " u"{platform_name} and has completed all of the required tasks for this course " u"under its guidelines. A {cert_type} certificate also indicates that the " u"identity of the learner has been checked and " u"is valid.").format(cert_type=certificate_type, platform_name=platform_name) elif mode == 'xseries': # Translators: This text describes the 'XSeries' course certificate type. An XSeries is a collection of # courses related to each other in a meaningful way, such as a specific topic or theme, or even an organization certificate_type_description = _(u"An {cert_type} certificate demonstrates a high level of " u"achievement in a program of study, and includes verification of " u"the student's identity.").format(cert_type=certificate_type) return certificate_type_description def _update_certificate_context(context, course, user_certificate, platform_name): """ Build up the certificate web view context using the provided values (Helper method to keep the view clean) """ # Populate dynamic output values using the course/certificate data loaded above certificate_type = context.get('certificate_type') # Override the defaults with any mode-specific static values context['certificate_id_number'] = user_certificate.verify_uuid context['certificate_verify_url'] = u"{prefix}{uuid}{suffix}".format( prefix=context.get('certificate_verify_url_prefix'), uuid=user_certificate.verify_uuid, suffix=context.get('certificate_verify_url_suffix') ) # Translators: The format of the date includes the full name of the month date = display_date_for_certificate(course, user_certificate) context['certificate_date_issued'] = strftime_localized(date, settings.CERTIFICATE_DATE_FORMAT) # Translators: This text represents the verification of the certificate context['document_meta_description'] = _(u'This is a valid {platform_name} certificate for {user_name}, ' u'who participated in {partner_short_name} {course_number}').format( platform_name=platform_name, user_name=context['accomplishment_copy_name'], partner_short_name=context['organization_short_name'], course_number=context['course_number'] ) # Translators: This text is bound to the HTML 'title' element of the page and appears in the browser title bar context['document_title'] = _(u"{partner_short_name} {course_number} Certificate | {platform_name}").format( partner_short_name=context['organization_short_name'], course_number=context['course_number'], platform_name=platform_name ) # Translators: This text fragment appears after the student's name (displayed in a large font) on the certificate # screen. The text describes the accomplishment represented by the certificate information displayed to the user context['accomplishment_copy_description_full'] = _(u"successfully completed, received a passing grade, and was " u"awarded this {platform_name} {certificate_type} " u"Certificate of Completion in ").format( platform_name=platform_name, certificate_type=context.get("certificate_type")) certificate_type_description = get_certificate_description(user_certificate.mode, certificate_type, platform_name) if certificate_type_description: context['certificate_type_description'] = certificate_type_description # Translators: This text describes the purpose (and therefore, value) of a course certificate context['certificate_info_description'] = _(u"{platform_name} acknowledges achievements through " u"certificates, which are awarded for course activities " u"that {platform_name} students complete.").format( platform_name=platform_name, ) def _update_context_with_basic_info(context, course_id, platform_name, configuration): """ Updates context dictionary with basic info required before rendering simplest certificate templates. """ context['platform_name'] = platform_name context['course_id'] = course_id # Update the view context with the default ConfigurationModel settings context.update(configuration.get('default', {})) # Translators: 'All rights reserved' is a legal term used in copyrighting to protect published content reserved = _("All rights reserved") context['copyright_text'] = u'&copy; {year} {platform_name}. {reserved}.'.format( year=datetime.now(pytz.timezone(settings.TIME_ZONE)).year, platform_name=platform_name, reserved=reserved ) # Translators: This text is bound to the HTML 'title' element of the page and appears # in the browser title bar when a requested certificate is not found or recognized context['document_title'] = _("Invalid Certificate") context['company_tos_urltext'] = _("Terms of Service & Honor Code") # Translators: A 'Privacy Policy' is a legal document/statement describing a website's use of personal information context['company_privacy_urltext'] = _("Privacy Policy") # Translators: This line appears as a byline to a header image and describes the purpose of the page context['logo_subtitle'] = _("Certificate Validation") # Translators: Accomplishments describe the awards/certifications obtained by students on this platform context['accomplishment_copy_about'] = _(u'About {platform_name} Accomplishments').format( platform_name=platform_name ) # Translators: This line appears on the page just before the generation date for the certificate context['certificate_date_issued_title'] = _("Issued On:") # Translators: The Certificate ID Number is an alphanumeric value unique to each individual certificate context['certificate_id_number_title'] = _('Certificate ID Number') context['certificate_info_title'] = _(u'About {platform_name} Certificates').format( platform_name=platform_name ) context['certificate_verify_title'] = _(u"How {platform_name} Validates Student Certificates").format( platform_name=platform_name ) # Translators: This text describes the validation mechanism for a certificate file (known as GPG security) context['certificate_verify_description'] = _(u'Certificates issued by {platform_name} are signed by a gpg key so ' u'that they can be validated independently by anyone with the ' u'{platform_name} public key. For independent verification, ' u'{platform_name} uses what is called a ' u'"detached signature"&quot;".').format(platform_name=platform_name) context['certificate_verify_urltext'] = _("Validate this certificate for yourself") # Translators: This text describes (at a high level) the mission and charter the edX platform and organization context['company_about_description'] = _(u"{platform_name} offers interactive online classes and MOOCs.").format( platform_name=platform_name) context['company_about_title'] = _(u"About {platform_name}").format(platform_name=platform_name) context['company_about_urltext'] = _(u"Learn more about {platform_name}").format(platform_name=platform_name) context['company_courselist_urltext'] = _(u"Learn with {platform_name}").format(platform_name=platform_name) context['company_careers_urltext'] = _(u"Work at {platform_name}").format(platform_name=platform_name) context['company_contact_urltext'] = _(u"Contact {platform_name}").format(platform_name=platform_name) # Translators: This text appears near the top of the certificate and describes the guarantee provided by edX context['document_banner'] = _(u"{platform_name} acknowledges the following student accomplishment").format( platform_name=platform_name ) def _update_course_context(request, context, course, platform_name): """ Updates context dictionary with course info. """ context['full_course_image_url'] = request.build_absolute_uri(course_image_url(course)) course_title_from_cert = context['certificate_data'].get('course_title', '') accomplishment_copy_course_name = course_title_from_cert if course_title_from_cert else course.display_name context['accomplishment_copy_course_name'] = accomplishment_copy_course_name course_number = course.display_coursenumber if course.display_coursenumber else course.number context['course_number'] = course_number if context['organization_long_name']: # Translators: This text represents the description of course context['accomplishment_copy_course_description'] = _(u'a course of study offered by {partner_short_name}, ' 'an online learning initiative of ' '{partner_long_name}.').format( partner_short_name=context['organization_short_name'], partner_long_name=context['organization_long_name'], platform_name=platform_name) else: # Translators: This text represents the description of course context['accomplishment_copy_course_description'] = _('a course of study offered by ' '{partner_short_name}.').format( partner_short_name=context['organization_short_name'], platform_name=platform_name) def _update_social_context(request, context, course, user_certificate, platform_name): """ Updates context dictionary with info required for social sharing. """ share_settings = configuration_helpers.get_value("SOCIAL_SHARING_SETTINGS", settings.SOCIAL_SHARING_SETTINGS) context['facebook_share_enabled'] = share_settings.get('CERTIFICATE_FACEBOOK', False) context['facebook_app_id'] = configuration_helpers.get_value("FACEBOOK_APP_ID", settings.FACEBOOK_APP_ID) context['facebook_share_text'] = share_settings.get( 'CERTIFICATE_FACEBOOK_TEXT', _(u"I completed the {course_title} course on {platform_name}.").format( course_title=context['accomplishment_copy_course_name'], platform_name=platform_name ) ) context['twitter_share_enabled'] = share_settings.get('CERTIFICATE_TWITTER', False) context['twitter_share_text'] = share_settings.get( 'CERTIFICATE_TWITTER_TEXT', _(u"I completed a course at {platform_name}. Take a look at my certificate.").format( platform_name=platform_name ) ) share_url = request.build_absolute_uri(get_certificate_url(course_id=course.id, uuid=user_certificate.verify_uuid)) context['share_url'] = share_url twitter_url = '' if context.get('twitter_share_enabled', False): twitter_url = 'https://twitter.com/intent/tweet?text={twitter_share_text}&url={share_url}'.format( twitter_share_text=smart_str(context['twitter_share_text']), share_url=six.moves.urllib.parse.quote_plus(smart_str(share_url)) ) context['twitter_url'] = twitter_url context['linked_in_url'] = None # If enabled, show the LinkedIn "add to profile" button # Clicking this button sends the user to LinkedIn where they # can add the certificate information to their profile. linkedin_config = LinkedInAddToProfileConfiguration.current() if linkedin_config.is_enabled(): context['linked_in_url'] = linkedin_config.add_to_profile_url( course.display_name, user_certificate.mode, smart_str(share_url), certificate=user_certificate ) def _update_context_with_user_info(context, user, user_certificate): """ Updates context dictionary with user related info. """ user_fullname = user.profile.name context['username'] = user.username context['course_mode'] = user_certificate.mode context['accomplishment_user_id'] = user.id context['accomplishment_copy_name'] = user_fullname context['accomplishment_copy_username'] = user.username context['accomplishment_more_title'] = _(u"More Information About {user_name}'s Certificate:").format( user_name=user_fullname ) # Translators: This line is displayed to a user who has completed a course and achieved a certification context['accomplishment_banner_opening'] = _(u"{fullname}, you earned a certificate!").format( fullname=user_fullname ) # Translators: This line congratulates the user and instructs them to share their accomplishment on social networks context['accomplishment_banner_congrats'] = _("Congratulations! This page summarizes what " "you accomplished. Show it off to family, friends, and colleagues " "in your social and professional networks.") # Translators: This line leads the reader to understand more about the certificate that a student has been awarded context['accomplishment_copy_more_about'] = _(u"More about {fullname}'s accomplishment").format( fullname=user_fullname ) def _get_user_certificate(request, user, course_key, course, preview_mode=None): """ Retrieves user's certificate from db. Creates one in case of preview mode. Returns None if there is no certificate generated for given user otherwise returns `GeneratedCertificate` instance. """ user_certificate = None if preview_mode: # certificate is being previewed from studio if request.user.has_perm(PREVIEW_CERTIFICATES, course): if course.certificate_available_date and not course.self_paced: modified_date = course.certificate_available_date else: modified_date = datetime.now().date() user_certificate = GeneratedCertificate( mode=preview_mode, verify_uuid=six.text_type(uuid4().hex), modified_date=modified_date, created_date=datetime.now().date(), ) elif certificates_viewable_for_course(course): # certificate is being viewed by learner or public try: user_certificate = GeneratedCertificate.eligible_certificates.get( user=user, course_id=course_key, status=CertificateStatuses.downloadable ) except GeneratedCertificate.DoesNotExist: pass return user_certificate def _track_certificate_events(request, course, user, user_certificate): """ Tracks web certificate view related events. """ # Badge Request Event Tracking Logic course_key = course.location.course_key if 'evidence_visit' in request.GET: badge_class = get_completion_badge(course_key, user) if not badge_class: log.warning(u'Visit to evidence URL for badge, but badges not configured for course "%s"', course_key) badges = [] else: badges = badge_class.get_for_user(user) if badges: # There should only ever be one of these. badge = badges[0] tracker.emit( 'edx.badge.assertion.evidence_visited', { 'badge_name': badge.badge_class.display_name, 'badge_slug': badge.badge_class.slug, 'badge_generator': badge.backend, 'issuing_component': badge.badge_class.issuing_component, 'user_id': user.id, 'course_id': six.text_type(course_key), 'enrollment_mode': badge.badge_class.mode, 'assertion_id': badge.id, 'assertion_image_url': badge.image_url, 'assertion_json_url': badge.assertion_url, 'issuer': badge.data.get('issuer'), } ) else: log.warning( u"Could not find badge for %s on course %s.", user.id, course_key, ) # track certificate evidence_visited event for analytics when certificate_user and accessing_user are different if request.user and request.user.id != user.id: emit_certificate_event('evidence_visited', user, six.text_type(course.id), course, { 'certificate_id': user_certificate.verify_uuid, 'enrollment_mode': user_certificate.mode, 'social_network': CertificateSocialNetworks.linkedin }) def _update_badge_context(context, course, user): """ Updates context with badge info. """ badge = None if badges_enabled() and course.issue_badges: badges = get_completion_badge(course.location.course_key, user).get_for_user(user) if badges: badge = badges[0] context['badge'] = badge def _update_organization_context(context, course): """ Updates context with organization related info. """ partner_long_name, organization_logo = None, None partner_short_name = course.display_organization if course.display_organization else course.org organizations = organizations_api.get_course_organizations(course_key=course.id) if organizations: # TODO Need to add support for multiple organizations, Currently we are interested in the first one. organization = organizations[0] partner_long_name = organization.get('name', partner_long_name) partner_short_name = organization.get('short_name', partner_short_name) organization_logo = organization.get('logo', None) context['organization_long_name'] = partner_long_name context['organization_short_name'] = partner_short_name context['accomplishment_copy_course_org'] = partner_short_name context['organization_logo'] = organization_logo def unsupported_url(request, user_id, course_id): # pylint: disable=unused-argument """ This view returns the un-supported url page aimed to let the user aware that url is no longer supported """ platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME) configuration = CertificateHtmlViewConfiguration.get_config() return _render_invalid_certificate( request, course_id, platform_name, configuration, cert_path='certificates/url_unsupported.html' ) @login_required def render_preview_certificate(request, course_id): """ This view renders the course certificate in preview mode """ return render_html_view(request, six.text_type(course_id)) def render_cert_by_uuid(request, certificate_uuid): """ This public view generates an HTML representation of the specified certificate """ try: certificate = GeneratedCertificate.eligible_certificates.get( verify_uuid=certificate_uuid, status=CertificateStatuses.downloadable ) return render_html_view(request, six.text_type(certificate.course_id), certificate) except GeneratedCertificate.DoesNotExist as e: raise Http404 from e @handle_500( template_path="certificates/server-error.html", test_func=lambda request: request.GET.get('preview', None) ) def render_html_view(request, course_id, certificate=None): """ This public view generates an HTML representation of the specified user and course If a certificate is not available, we display a "Sorry!" screen instead """ user = certificate.user if certificate else request.user user_id = user.id preview_mode = request.GET.get('preview', None) platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME) configuration = CertificateHtmlViewConfiguration.get_config() # Kick the user back to the "Invalid" screen if the feature is disabled globally if not settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False): return _render_invalid_certificate(request, course_id, platform_name, configuration) # Load the course and user objects try: course_key = CourseKey.from_string(course_id) course = get_course_by_id(course_key) # For any course or user exceptions, kick the user back to the "Invalid" screen except (InvalidKeyError, Http404) as exception: error_str = ( u"Invalid cert: error finding course %s " u"Specific error: %s" ) log.info(error_str, course_id, str(exception)) return _render_invalid_certificate(request, course_id, platform_name, configuration) # Kick the user back to the "Invalid" screen if the feature is disabled for the course if not course.cert_html_view_enabled: log.info( u"Invalid cert: HTML certificates disabled for %s. User id: %d", course_id, user_id, ) return _render_invalid_certificate(request, course_id, platform_name, configuration) # Load user's certificate user_certificate = _get_user_certificate(request, user, course_key, course, preview_mode) if not user_certificate: log.info( u"Invalid cert: User %d does not have eligible cert for %s.", user_id, course_id, ) return _render_invalid_certificate(request, course_id, platform_name, configuration) # Get the active certificate configuration for this course # If we do not have an active certificate, we'll need to send the user to the "Invalid" screen # Passing in the 'preview' parameter, if specified, will return a configuration, if defined active_configuration = get_active_web_certificate(course, preview_mode) if active_configuration is None: log.info( u"Invalid cert: course %s does not have an active configuration. User id: %d", course_id, user_id, ) return _render_invalid_certificate(request, course_id, platform_name, configuration) # Get data from Discovery service that will be necessary for rendering this Certificate. catalog_data = _get_catalog_data_for_course(course_key) # Determine whether to use the standard or custom template to render the certificate. custom_template = None custom_template_language = None if settings.FEATURES.get('CUSTOM_CERTIFICATE_TEMPLATES_ENABLED', False): log.info(u"Custom certificate for course %s", course_id) custom_template, custom_template_language = _get_custom_template_and_language( course.id, user_certificate.mode, catalog_data.pop('content_language', None) ) # Determine the language that should be used to render the certificate. # For the standard certificate template, use the user language. For custom templates, use # the language associated with the template. user_language = translation.get_language() certificate_language = custom_template_language if custom_template else user_language log.info( u"certificate language is: %s for the course: %s", certificate_language, course_key ) # Generate the certificate context in the correct language, then render the template. with translation.override(certificate_language): context = {'user_language': user_language} _update_context_with_basic_info(context, course_id, platform_name, configuration) context['certificate_data'] = active_configuration # Append/Override the existing view context values with any mode-specific ConfigurationModel values context.update(configuration.get(user_certificate.mode, {})) # Append organization info _update_organization_context(context, course) # Append course info _update_course_context(request, context, course, platform_name) # Append course run info from discovery context.update(catalog_data) # Append user info _update_context_with_user_info(context, user, user_certificate) # Append social sharing info _update_social_context(request, context, course, user_certificate, platform_name) # Append/Override the existing view context values with certificate specific values _update_certificate_context(context, course, user_certificate, platform_name) # Append badge info _update_badge_context(context, course, user) # Add certificate header/footer data to current context context.update(get_certificate_header_context(is_secure=request.is_secure())) context.update(get_certificate_footer_context()) # Append/Override the existing view context values with any course-specific static values from Advanced Settings context.update(course.cert_html_view_overrides) # Track certificate view events _track_certificate_events(request, course, user, user_certificate) # Render the certificate return _render_valid_certificate(request, context, custom_template) def _get_catalog_data_for_course(course_key): """ Retrieve data from the Discovery service necessary for rendering a certificate for a specific course. """ course_certificate_settings = CertificateGenerationCourseSetting.get(course_key) if not course_certificate_settings: return {} catalog_data = {} course_run_fields = [] if course_certificate_settings.language_specific_templates_enabled: course_run_fields.append('content_language') if course_certificate_settings.include_hours_of_effort: course_run_fields.extend(['weeks_to_complete', 'max_effort']) if course_run_fields: course_run_data = get_course_run_details(course_key, course_run_fields) if course_run_data.get('weeks_to_complete') and course_run_data.get('max_effort'): try: weeks_to_complete = int(course_run_data['weeks_to_complete']) max_effort = int(course_run_data['max_effort']) catalog_data['hours_of_effort'] = weeks_to_complete * max_effort except ValueError: log.exception('Error occurred while parsing course run details') catalog_data['content_language'] = course_run_data.get('content_language') log.info( u"catalog data received for course: %s is : %s", course_key, catalog_data, ) return catalog_data def _get_custom_template_and_language(course_id, course_mode, course_language): """ Return the custom certificate template, if any, that should be rendered for the provided course/mode/language combination, along with the language that should be used to render that template. """ closest_released_language = get_closest_released_language(course_language) if course_language else None log.info( u"closest released language for %s is %s and course language was: %s", course_id, closest_released_language, course_language ) template = get_certificate_template(course_id, course_mode, closest_released_language) if template and template.language: return (template, closest_released_language) elif template: user_language = translation.get_language() return (template, user_language) else: return (None, None) def _render_invalid_certificate(request, course_id, platform_name, configuration, cert_path=INVALID_CERTIFICATE_TEMPLATE_PATH): """ Renders the invalid certificate view with default header and footer. """ context = {} _update_context_with_basic_info(context, course_id, platform_name, configuration) # Add certificate header/footer data to current context context.update(get_certificate_header_context(is_secure=request.is_secure())) context.update(get_certificate_footer_context()) return render_to_response(cert_path, context) def _render_valid_certificate(request, context, custom_template=None): """ Renders certificate """ if custom_template: template = Template( custom_template.template, output_encoding='utf-8', input_encoding='utf-8', default_filters=['decode.utf8'], encoding_errors='replace', ) context = RequestContext(request, context) return HttpResponse(template.render(context)) else: return render_to_response("certificates/valid.html", context)
stvstnfrd/edx-platform
lms/djangoapps/certificates/views/webview.py
Python
agpl-3.0
32,627
[ "VisIt" ]
3113dda81e8c1e485a135d47a4a5edf518b9223bbbc7107f233725e504d716e9
# -*- coding: utf-8 -*- # Copyright (c) 2015-2022, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ A unified data anlaysis and visualization platform for computational and theoretical chemists, physicists, etc. Support for molecular geometry and orbital visualization is provided via the `Jupyter`_ notebook, a web-browser based interactive (multi-programming language) environment. .. extended description (todo) Warning: This package uses the `atomic`_ unit system (Hartree) by default. .. _atomic: https://en.wikipedia.org/wiki/Atomic_units Supported Software --------------------- The list below contains third-party software that is supported by this package. For specific features supported (per software), see the appropriate description below. - :mod:`~exatomic.adf.__init__`: `Amsterdam Density Functional`_ - :mod:`~exatomic.gaussian.__init__`: `Gaussian`_ - :mod:`~exatomic.molcas.__init__`: `OpenMolcas`_ - :mod:`~exatomic.nbo.__init__`: `NBO`_ - :mod:`~exatomic.nwchem.__init__`: `NWChem`_ - :mod:`~exatomic.qe.__init__`: `Quantum ESPRESSO`_ - :mod:`~exatomic.interfaces.__init__`: Additional 3rd party support .. _Jupyter: https://jupyter.org .. _Amsterdam Density Functional: https://www.scm.com .. _Gaussian: http://gaussian.com/ .. _OpenMolcas: https://gitlab.com/Molcas/OpenMolcas .. _NBO: http://nbo6.chem.wisc.edu/ .. _NWChem: http://www.nwchem-sw.org/index.php/Main_Page .. _Quantum ESPRESSO: http://www.quantum-espresso.org/ """ from __future__ import absolute_import __js_version__ = "^0.5.0" def _jupyter_nbextension_paths(): """Jupyter notebook extension directory paths.""" return [{ 'section': "notebook", 'src': "static/js", 'dest': "exatomic", 'require': "exatomic/extension" }] import os import tempfile import logging.config import yaml with open(os.path.join(os.path.dirname(__file__), 'conf', 'logging.yml'), 'r') as f: _log = yaml.safe_load(f.read()) _log['handlers']['file']['filename'] = os.path.join(tempfile.gettempdir(), 'exa.log') logging.config.dictConfig(_log) def func_log(func): name = '.'.join([func.__module__, func.__name__]) return logging.getLogger(name) from ._version import __version__ from . import core from .core import Universe, Editor, Atom, AtomicField, Frame, Tensor, add_tensor from .interfaces import XYZ, Cube from .widgets import DemoContainer, DemoUniverse, UniverseWidget, TensorContainer
exa-analytics/exatomic
exatomic/__init__.py
Python
apache-2.0
2,551
[ "ADF", "ESPResSo", "Gaussian", "MOLCAS", "NWChem", "OpenMolcas", "Quantum ESPRESSO" ]
84eec5ff50fd3e6aeda6bbe18a5dc8ae39dae0959591178246fefd5af1eb360b
""" General Numerical Solver for the 1D Time-Dependent Schrodinger's equation. author: Jake Vanderplas email: vanderplas@astro.washington.edu website: http://jakevdp.github.com license: BSD Please feel free to use and modify this, but keep the above information. Thanks! """ import numpy as np from matplotlib import pyplot as pl from matplotlib import animation from scipy.fftpack import fft,ifft class Schrodinger(object): """ Class which implements a numerical solution of the time-dependent Schrodinger equation for an arbitrary potential """ def __init__(self, x, psi_x0, V_x, k0 = None, hbar=1, m=1, t0=0.0): """ Parameters ---------- x : array_like, float length-N array of evenly spaced spatial coordinates psi_x0 : array_like, complex length-N array of the initial wave function at time t0 V_x : array_like, float length-N array giving the potential at each x k0 : float the minimum value of k. Note that, because of the workings of the fast fourier transform, the momentum wave-number will be defined in the range k0 < k < 2*pi / dx where dx = x[1]-x[0]. If you expect nonzero momentum outside this range, you must modify the inputs accordingly. If not specified, k0 will be calculated such that the range is [-k0,k0] hbar : float value of planck's constant (default = 1) m : float particle mass (default = 1) t0 : float initial tile (default = 0) """ # Validation of array inputs self.x, psi_x0, self.V_x = map(np.asarray, (x, psi_x0, V_x)) N = self.x.size assert self.x.shape == (N,) assert psi_x0.shape == (N,) assert self.V_x.shape == (N,) # Set internal parameters self.hbar = hbar self.m = m self.t = t0 self.dt_ = None self.N = len(x) self.dx = self.x[1] - self.x[0] self.dk = 2 * np.pi / (self.N * self.dx) # set momentum scale if k0 == None: self.k0 = -0.5 * self.N * self.dk else: self.k0 = k0 self.k = self.k0 + self.dk * np.arange(self.N) self.psi_x = psi_x0 self.compute_k_from_x() # variables which hold steps in evolution of the self.x_evolve_half = None self.x_evolve = None self.k_evolve = None # attributes used for dynamic plotting self.psi_x_line = None self.psi_k_line = None self.V_x_line = None def _set_psi_x(self, psi_x): self.psi_mod_x = (psi_x * np.exp(-1j * self.k[0] * self.x) * self.dx / np.sqrt(2 * np.pi)) def _get_psi_x(self): return (self.psi_mod_x * np.exp(1j * self.k[0] * self.x) * np.sqrt(2 * np.pi) / self.dx) def _set_psi_k(self, psi_k): self.psi_mod_k = psi_k * np.exp(1j * self.x[0] * self.dk * np.arange(self.N)) def _get_psi_k(self): return self.psi_mod_k * np.exp(-1j * self.x[0] * self.dk * np.arange(self.N)) def _get_dt(self): return self.dt_ def _set_dt(self, dt): if dt != self.dt_: self.dt_ = dt self.x_evolve_half = np.exp(-0.5 * 1j * self.V_x / self.hbar * dt ) self.x_evolve = self.x_evolve_half * self.x_evolve_half self.k_evolve = np.exp(-0.5 * 1j * self.hbar / self.m * (self.k * self.k) * dt) psi_x = property(_get_psi_x, _set_psi_x) psi_k = property(_get_psi_k, _set_psi_k) dt = property(_get_dt, _set_dt) def compute_k_from_x(self): self.psi_mod_k = fft(self.psi_mod_x) def compute_x_from_k(self): self.psi_mod_x = ifft(self.psi_mod_k) def time_step(self, dt, Nsteps = 1): """ Perform a series of time-steps via the time-dependent Schrodinger Equation. Parameters ---------- dt : float the small time interval over which to integrate Nsteps : float, optional the number of intervals to compute. The total change in time at the end of this method will be dt * Nsteps. default is N = 1 """ self.dt = dt if Nsteps > 0: self.psi_mod_x *= self.x_evolve_half for i in xrange(Nsteps - 1): self.compute_k_from_x() self.psi_mod_k *= self.k_evolve self.compute_x_from_k() self.psi_mod_x *= self.x_evolve self.compute_k_from_x() self.psi_mod_k *= self.k_evolve self.compute_x_from_k() self.psi_mod_x *= self.x_evolve_half self.compute_k_from_x() self.t += dt * Nsteps ###################################################################### # Helper functions for gaussian wave-packets def gauss_x(x, a, x0, k0): """ a gaussian wave packet of width a, centered at x0, with momentum k0 """ return ((a * np.sqrt(np.pi)) ** (-0.5) * np.exp(-0.5 * ((x - x0) * 1. / a) ** 2 + 1j * x * k0)) def gauss_k(k,a,x0,k0): """ analytical fourier transform of gauss_x(x), above """ return ((a / np.sqrt(np.pi))**0.5 * np.exp(-0.5 * (a * (k - k0)) ** 2 - 1j * (k - k0) * x0)) ###################################################################### # Utility functions for running the animation def theta(x): """ theta function : returns 0 if x<=0, and 1 if x>0 """ x = np.asarray(x) y = np.zeros(x.shape) y[x > 0] = 1.0 return y def square_barrier(x, width, height): return height * (theta(x) - theta(x - width)) ###################################################################### # Create the animation # specify time steps and duration dt = 0.01 N_steps = 50 t_max = 120 frames = int(t_max / float(N_steps * dt)) # specify constants hbar = 1.0 # planck's constant m = 1.9 # particle mass # specify range in x coordinate N = 2 ** 11 dx = 0.1 x = dx * (np.arange(N) - 0.5 * N) # specify potential V0 = 1.5 L = hbar / np.sqrt(2 * m * V0) a = 3 * L x0 = -60 * L V_x = square_barrier(x, a, V0) V_x[x < -98] = 1E6 V_x[x > 98] = 1E6 # specify initial momentum and quantities derived from it p0 = np.sqrt(2 * m * 0.2 * V0) dp2 = p0 * p0 * 1./80 d = hbar / np.sqrt(2 * dp2) k0 = p0 / hbar v0 = p0 / m psi_x0 = gauss_x(x, d, x0, k0) # define the Schrodinger object which performs the calculations S = Schrodinger(x=x, psi_x0=psi_x0, V_x=V_x, hbar=hbar, m=m, k0=-28) ###################################################################### # Set up plot fig = pl.figure() # plotting limits xlim = (-100, 100) klim = (-5, 5) # top axes show the x-space data ymin = 0 ymax = V0 ax1 = fig.add_subplot(211, xlim=xlim, ylim=(ymin - 0.2 * (ymax - ymin), ymax + 0.2 * (ymax - ymin))) psi_x_line, = ax1.plot([], [], c='r', label=r'$|\psi(x)|$') V_x_line, = ax1.plot([], [], c='k', label=r'$V(x)$') center_line = ax1.axvline(0, c='k', ls=':', label = r"$x_0 + v_0t$") title = ax1.set_title("") ax1.legend(prop=dict(size=12)) ax1.set_xlabel('$x$') ax1.set_ylabel(r'$|\psi(x)|$') # bottom axes show the k-space data ymin = abs(S.psi_k).min() ymax = abs(S.psi_k).max() ax2 = fig.add_subplot(212, xlim=klim, ylim=(ymin - 0.2 * (ymax - ymin), ymax + 0.2 * (ymax - ymin))) psi_k_line, = ax2.plot([], [], c='r', label=r'$|\psi(k)|$') p0_line1 = ax2.axvline(-p0 / hbar, c='k', ls=':', label=r'$\pm p_0$') p0_line2 = ax2.axvline(p0 / hbar, c='k', ls=':') mV_line = ax2.axvline(np.sqrt(2 * V0) / hbar, c='k', ls='--', label=r'$\sqrt{2mV_0}$') ax2.legend(prop=dict(size=12)) ax2.set_xlabel('$k$') ax2.set_ylabel(r'$|\psi(k)|$') V_x_line.set_data(S.x, S.V_x) ###################################################################### # Animate plot def init(): psi_x_line.set_data([], []) V_x_line.set_data([], []) center_line.set_data([], []) psi_k_line.set_data([], []) title.set_text("") return (psi_x_line, V_x_line, center_line, psi_k_line, title) def animate(i): S.time_step(dt, N_steps) psi_x_line.set_data(S.x, 4 * abs(S.psi_x)) V_x_line.set_data(S.x, S.V_x) center_line.set_data(2 * [x0 + S.t * p0 / m], [0, 1]) psi_k_line.set_data(S.k, abs(S.psi_k)) title.set_text("t = %.2f" % S.t) return (psi_x_line, V_x_line, center_line, psi_k_line, title) # call the animator. blit=True means only re-draw the parts that have changed. anim = animation.FuncAnimation(fig, animate, init_func=init, frames=frames, interval=30, blit=True) # uncomment the following line to save the video in mp4 format. This # requires either mencoder or ffmpeg to be installed on your system #anim.save('schrodinger_barrier.mp4', fps=15, extra_args=['-vcodec', 'libx264']) pl.show()
ingelectronicadj/FisicaConPython
FisicaCuantica/Soluciones de la ecuacion de Schrodinger/schrodinger.py
Python
gpl-3.0
9,290
[ "Gaussian" ]
62936ef26a28cb0f738613c72fa1d62330c2d6599bc50c9b35d6adf21ad0c88d
######################################### # Polly.py # categories: speech # more info @: http://myrobotlab.org/service/Polly ######################################### # start the service polly = Runtime.start('polly','Polly') polly.setKey("YOUR_KEY_ID","YOUR_KEY_SECRET") polly.setLanguage("en") polly.setVoice(u"Brian") polly.speakBlocking(u"Hello this is Brian speakin !") polly.setLanguage("fr") polly.setVoice(u"Céline") polly.speakBlocking(u"Ceci est une voix française en U T F 8")
MyRobotLab/pyrobotlab
service/Polly.py
Python
apache-2.0
491
[ "Brian" ]
8b4a8139b35d5d05b1d2cb805e2613dc45889a99a2be3919dc316d8e7b3fb496