text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
from wandbox import __bash__ as bash from wandbox import __cc__ as cc from wandbox import __cmake__ as cmake from wandbox import __coffee__ as coffee from wandbox import __cpp__ as cpp from wandbox import __crystal__ as crystal from wandbox import __csharp__ as cs from wandbox import __cxx__ as cxx from wandbox import __dmd__ as dmd from wandbox import __elixir__ as elixir from wandbox import __erlang__ as erlang from wandbox import __fsharp__ as fs from wandbox import __ghc__ as ghc from wandbox import __go__ as go from wandbox import __groovy__ as groovy from wandbox import __java__ as java from wandbox import __js__ as js from wandbox import __julia__ as julia from wandbox import __lazyk__ as lazyk from wandbox import __lisp__ as lisp from wandbox import __lua__ as lua from wandbox import __nim__ as nim from wandbox import __ocaml__ as ocaml from wandbox import __openssl__ as openssl from wandbox import __pascal__ as pascal from wandbox import __perl__ as perl from wandbox import __php__ as php from wandbox import __pony__ as pony from wandbox import __python__ as python from wandbox import __r__ as rscript from wandbox import __ruby__ as ruby from wandbox import __rust__ as rust from wandbox import __scala__ as scala from wandbox import __sql__ as sql from wandbox import __swift__ as swift from wandbox import __tsc__ as tsc from wandbox import __vim__ as vim def get_all_cli(): clis = [ bash.BashCLI.InnerCLI(), cc.CcCLI(), cmake.CMakeCLI(), coffee.CoffeeCLI(), cpp.CppCLI(), crystal.CrystalCLI(), cs.CsCLI(), cxx.CxxCLI(), dmd.DCLI(), elixir.ElixirCLI(), erlang.ErlangCLI(), fs.FsCLI(), ghc.GhcCLI(), go.GoCLI(), groovy.GroovyCLI(), java.JavaCLI(), js.JsCLI(), julia.JuliaCLI(), lazyk.LazyKCLI(), lisp.LispCLI(), lua.LuaCLI(), nim.NimCLI(), ocaml.OCamlCLI(), openssl.OpenSSLCLI.InnerCLI(), pascal.PascalCLI(), perl.PerlCLI(), php.PhpCLI(), pony.PonyCLI(), python.PythonCLI(), rscript.RscriptCLI(), ruby.RubyCLI(), rust.RustCLI(), scala.ScalaCLI(), sql.SqlCLI(), swift.SwiftCLI(), tsc.TscCLI(), vim.VimCLI() ] return clis
srz-zumix/wandbox-api
wandbox/__all__.py
Python
mit
2,358
[ "CRYSTAL" ]
3e178df115d70f2563eae885baf0288007c4a492d7e4ba9c80a2b608a2246f22
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module is intended to be used to compute Pourbaix diagrams of arbitrary compositions and formation energies. If you use this module in your work, please consider citing the following: General formalism for solid-aqueous equilibria from DFT: Persson et al., DOI: 10.1103/PhysRevB.85.235438 Decomposition maps, or Pourbaix hull diagrams Singh et al., DOI: 10.1021/acs.chemmater.7b03980 Fast computation of many-element Pourbaix diagrams: Patel et al., https://arxiv.org/abs/1909.00035 (submitted) """ import logging import numpy as np import itertools import re from copy import deepcopy from functools import cmp_to_key, partial, lru_cache from monty.json import MSONable, MontyDecoder from multiprocessing import Pool import warnings from scipy.spatial import ConvexHull, HalfspaceIntersection try: from scipy.special import comb except ImportError: from scipy.misc import comb from pymatgen.util.coord import Simplex from pymatgen.util.string import latexify from pymatgen.util.plotting import pretty_plot from pymatgen.core.periodic_table import Element from pymatgen.core.composition import Composition from pymatgen.core.ion import Ion from pymatgen.entries.computed_entries import ComputedEntry from pymatgen.analysis.reaction_calculator import Reaction, ReactionError from pymatgen.analysis.phase_diagram import PhaseDiagram, PDEntry from tqdm import tqdm __author__ = "Sai Jayaraman" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.4" __maintainer__ = "Joseph Montoya" __credits__ = "Arunima Singh, Joseph Montoya, Anjli Patel" __email__ = "joseph.montoya@tri.global" __status__ = "Production" __date__ = "Nov 1, 2012" logger = logging.getLogger(__name__) MU_H2O = -2.4583 PREFAC = 0.0591 # TODO: Revise to more closely reflect PDEntry, invoke from energy/composition # TODO: PourbaixEntries depend implicitly on having entry energies be # formation energies, should be a better way to get from raw energies # TODO: uncorrected_energy is a bit of a misnomer, but not sure what to rename class PourbaixEntry(MSONable): """ An object encompassing all data relevant to a solid or ion in a pourbaix diagram. Each bulk solid/ion has an energy g of the form: e = e0 + 0.0591 log10(conc) - nO mu_H2O + (nH - 2nO) pH + phi (-nH + 2nO + q) Note that the energies corresponding to the input entries should be formation energies with respect to hydrogen and oxygen gas in order for the pourbaix diagram formalism to work. This may be changed to be more flexible in the future. """ def __init__(self, entry, entry_id=None, concentration=1e-6): """ Args: entry (ComputedEntry/ComputedStructureEntry/PDEntry/IonEntry): An entry object entry_id (): concentration (): """ self.entry = entry if isinstance(entry, IonEntry): self.concentration = concentration self.phase_type = "Ion" self.charge = entry.ion.charge else: self.concentration = 1.0 self.phase_type = "Solid" self.charge = 0.0 self.uncorrected_energy = entry.energy if entry_id is not None: self.entry_id = entry_id elif hasattr(entry, "entry_id") and entry.entry_id: self.entry_id = entry.entry_id else: self.entry_id = None @property def npH(self): """ Returns: """ return self.entry.composition.get("H", 0.) - 2 * self.entry.composition.get("O", 0.) @property def nH2O(self): """ Returns: Number of H2O. """ return self.entry.composition.get("O", 0.) @property def nPhi(self): """ Returns: Number of H2O. """ return self.npH - self.charge @property def name(self): """ Returns: Name for entry """ if self.phase_type == "Solid": return self.entry.composition.reduced_formula + "(s)" elif self.phase_type == "Ion": return self.entry.name @property def energy(self): """ returns energy Returns (float): total energy of the pourbaix entry (at pH, V = 0 vs. SHE) """ # Note: this implicitly depends on formation energies as input return self.uncorrected_energy + self.conc_term - (MU_H2O * self.nH2O) @property def energy_per_atom(self): """ energy per atom of the pourbaix entry Returns (float): energy per atom """ return self.energy / self.composition.num_atoms def energy_at_conditions(self, pH, V): """ Get free energy for a given pH and V Args: pH (float): pH at which to evaluate free energy V (float): voltage at which to evaluate free energy Returns: free energy at conditions """ return self.energy + self.npH * PREFAC * pH + self.nPhi * V def get_element_fraction(self, element): """ Gets the elemental fraction of a given non-OH element Args: element (Element or str): string or element corresponding to element to get from composition Returns: fraction of element / sum(all non-OH elements) """ return self.composition.get(element) * self.normalization_factor @property def normalized_energy(self): """ Returns: energy normalized by number of non H or O atoms, e. g. for Zn2O6, energy / 2 or for AgTe3(OH)3, energy / 4 """ return self.energy * self.normalization_factor def normalized_energy_at_conditions(self, pH, V): """ Energy at an electrochemical condition, compatible with numpy arrays for pH/V input Args: pH (float): pH at condition V (float): applied potential at condition Returns: energy normalized by number of non-O/H atoms at condition """ return self.energy_at_conditions(pH, V) * self.normalization_factor @property def conc_term(self): """ Returns the concentration contribution to the free energy, and should only be present when there are ions in the entry """ return PREFAC * np.log10(self.concentration) # TODO: not sure if these are strictly necessary with refactor def as_dict(self): """ Returns dict which contains Pourbaix Entry data. Note that the pH, voltage, H2O factors are always calculated when constructing a PourbaixEntry object. """ d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__} if isinstance(self.entry, IonEntry): d["entry_type"] = "Ion" else: d["entry_type"] = "Solid" d["entry"] = self.entry.as_dict() d["concentration"] = self.concentration d["entry_id"] = self.entry_id return d @classmethod def from_dict(cls, d): """ Invokes """ entry_type = d["entry_type"] if entry_type == "Ion": entry = IonEntry.from_dict(d["entry"]) else: entry = PDEntry.from_dict(d["entry"]) entry_id = d["entry_id"] concentration = d["concentration"] return PourbaixEntry(entry, entry_id, concentration) @property def normalization_factor(self): """ Sum of number of atoms minus the number of H and O in composition """ return 1.0 / (self.num_atoms - self.composition.get('H', 0) - self.composition.get('O', 0)) @property def composition(self): """ Returns composition """ return self.entry.composition @property def num_atoms(self): """ Return number of atoms in current formula. Useful for normalization """ return self.composition.num_atoms def __repr__(self): return "Pourbaix Entry : {} with energy = {:.4f}, npH = {}, nPhi = {}, nH2O = {}, entry_id = {} ".format( self.entry.composition, self.energy, self.npH, self.nPhi, self.nH2O, self.entry_id) def __str__(self): return self.__repr__() class MultiEntry(PourbaixEntry): """ PourbaixEntry-like object for constructing multi-elemental Pourbaix diagrams. """ def __init__(self, entry_list, weights=None): """ Initializes a MultiEntry. Args: entry_list ([PourbaixEntry]): List of component PourbaixEntries weights ([float]): Weights associated with each entry. Default is None """ if weights is None: self.weights = [1.0] * len(entry_list) else: self.weights = weights self.entry_list = entry_list @lru_cache() def __getattr__(self, item): """ Because most of the attributes here are just weighted averages of the entry_list, we save some space by having a set of conditionals to define the attributes """ # Attributes that are weighted averages of entry attributes if item in ["energy", "npH", "nH2O", "nPhi", "conc_term", "composition", "uncorrected_energy"]: # TODO: Composition could be changed for compat with sum if item == "composition": start = Composition({}) else: start = 0 return sum([getattr(e, item) * w for e, w in zip(self.entry_list, self.weights)], start) # Attributes that are just lists of entry attributes elif item in ["entry_id", "phase_type"]: return [getattr(e, item) for e in self.entry_list] # normalization_factor, num_atoms should work from superclass return self.__getattribute__(item) @property def name(self): """ MultiEntry name, i. e. the name of each entry joined by ' + ' """ return " + ".join([e.name for e in self.entry_list]) def __repr__(self): return "Multiple Pourbaix Entry: energy = {:.4f}, npH = {}, nPhi = {}, nH2O = {}, entry_id = {}, species: {}" \ .format(self.energy, self.npH, self.nPhi, self.nH2O, self.entry_id, self.name) def __str__(self): return self.__repr__() def as_dict(self): """ Returns: MSONable dict """ return {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "entry_list": [e.as_dict() for e in self.entry_list], "weights": self.weights} @classmethod def from_dict(cls, d): """ Args: d (): Dict representation Returns: MultiEntry """ entry_list = [PourbaixEntry.from_dict(e) for e in d.get("entry_list")] return cls(entry_list, d.get("weights")) # TODO: this class isn't particularly useful in its current form, could be # refactored to include information about the reference solid class IonEntry(PDEntry): """ Object similar to PDEntry, but contains an Ion object instead of a Composition object. .. attribute:: name A name for the entry. This is the string shown in the phase diagrams. By default, this is the reduced formula for the composition, but can be set to some other string for display purposes. """ def __init__(self, ion, energy, name=None, attribute=None): """ Args: ion: Ion object energy: Energy for composition. name: Optional parameter to name the entry. Defaults to the chemical formula. """ self.ion = ion # Auto-assign name name = name if name else self.ion.reduced_formula super(IonEntry, self).__init__( composition=ion.composition, energy=energy, name=name, attribute=attribute) @classmethod def from_dict(cls, d): """ Returns an IonEntry object from a dict. """ return IonEntry(Ion.from_dict(d["ion"]), d["energy"], d.get("name"), d.get("attribute")) def as_dict(self): """ Creates a dict of composition, energy, and ion name """ d = {"ion": self.ion.as_dict(), "energy": self.energy, "name": self.name} return d def __repr__(self): return "IonEntry : {} with energy = {:.4f}".format(self.composition, self.energy) def __str__(self): return self.__repr__() def ion_or_solid_comp_object(formula): """ Returns either an ion object or composition object given a formula. Args: formula: String formula. Eg. of ion: NaOH(aq), Na[+]; Eg. of solid: Fe2O3(s), Fe(s), Na2O Returns: Composition/Ion object """ m = re.search(r"\[([^\[\]]+)\]|\(aq\)", formula) if m: comp_obj = Ion.from_formula(formula) elif re.search(r"\(s\)", formula): comp_obj = Composition(formula[:-3]) else: comp_obj = Composition(formula) return comp_obj ELEMENTS_HO = {Element('H'), Element('O')} # TODO: the solids filter breaks some of the functionality of the # heatmap plotter, because the reference states for decomposition # don't include oxygen/hydrogen in the OER/HER regions # TODO: create a from_phase_diagram class method for non-formation energy # invocation # TODO: invocation from a MultiEntry entry list could be a bit more robust # TODO: serialization is still a bit rough around the edges class PourbaixDiagram(MSONable): """ Class to create a Pourbaix diagram from entries """ def __init__(self, entries, comp_dict=None, conc_dict=None, filter_solids=False, nproc=None): """ Args: entries ([PourbaixEntry] or [MultiEntry]): Entries list containing Solids and Ions or a list of MultiEntries comp_dict ({str: float}): Dictionary of compositions, defaults to equal parts of each elements conc_dict ({str: float}): Dictionary of ion concentrations, defaults to 1e-6 for each element filter_solids (bool): applying this filter to a pourbaix diagram ensures all included phases are filtered by stability on the compositional phase diagram. This breaks some of the functionality of the analysis, though, so use with caution. nproc (int): number of processes to generate multientries with in parallel. Defaults to None (serial processing) """ entries = deepcopy(entries) # Get non-OH elements self.pbx_elts = set(itertools.chain.from_iterable( [entry.composition.elements for entry in entries])) self.pbx_elts = list(self.pbx_elts - ELEMENTS_HO) self.dim = len(self.pbx_elts) - 1 # Process multientry inputs if isinstance(entries[0], MultiEntry): self._processed_entries = entries # Extract individual entries single_entries = list(set(itertools.chain.from_iterable( [e.entry_list for e in entries]))) self._unprocessed_entries = single_entries self._filtered_entries = single_entries self._conc_dict = None self._elt_comp = {k: v for k, v in entries[0].composition.items() if k not in ELEMENTS_HO} self._multielement = True # Process single entry inputs else: # Set default conc/comp dicts if not comp_dict: comp_dict = {elt.symbol: 1. / len(self.pbx_elts) for elt in self.pbx_elts} if not conc_dict: conc_dict = {elt.symbol: 1e-6 for elt in self.pbx_elts} self._conc_dict = conc_dict self._elt_comp = comp_dict self.pourbaix_elements = self.pbx_elts solid_entries = [entry for entry in entries if entry.phase_type == "Solid"] ion_entries = [entry for entry in entries if entry.phase_type == "Ion"] # If a conc_dict is specified, override individual entry concentrations for entry in ion_entries: ion_elts = list(set(entry.composition.elements) - ELEMENTS_HO) # TODO: the logic here for ion concentration setting is in two # places, in PourbaixEntry and here, should be consolidated if len(ion_elts) == 1: entry.concentration = conc_dict[ion_elts[0].symbol] \ * entry.normalization_factor elif len(ion_elts) > 1 and not entry.concentration: raise ValueError("Elemental concentration not compatible " "with multi-element ions") self._unprocessed_entries = solid_entries + ion_entries if not len(solid_entries + ion_entries) == len(entries): raise ValueError("All supplied entries must have a phase type of " "either \"Solid\" or \"Ion\"") if filter_solids: # O is 2.46 b/c pbx entry finds energies referenced to H2O entries_HO = [ComputedEntry('H', 0), ComputedEntry('O', 2.46)] solid_pd = PhaseDiagram(solid_entries + entries_HO) solid_entries = list(set(solid_pd.stable_entries) - set(entries_HO)) self._filtered_entries = solid_entries + ion_entries if len(comp_dict) > 1: self._multielement = True self._processed_entries = self._preprocess_pourbaix_entries( self._filtered_entries, nproc=nproc) else: self._processed_entries = self._filtered_entries self._multielement = False self._stable_domains, self._stable_domain_vertices = \ self.get_pourbaix_domains(self._processed_entries) def _convert_entries_to_points(self, pourbaix_entries): """ Args: pourbaix_entries ([PourbaixEntry]): list of pourbaix entries to process into vectors in nph-nphi-composition space Returns: list of vectors, [[nph, nphi, e0, x1, x2, ..., xn-1]] corresponding to each entry in nph-nphi-composition space """ vecs = [[entry.npH, entry.nPhi, entry.energy] + [entry.composition.get(elt) for elt in self.pbx_elts[:-1]] for entry in pourbaix_entries] vecs = np.array(vecs) norms = np.transpose([[entry.normalization_factor for entry in pourbaix_entries]]) vecs *= norms return vecs def _get_hull_in_nph_nphi_space(self, entries): """ Generates convex hull of pourbaix diagram entries in composition, npH, and nphi space. This enables filtering of multi-entries such that only compositionally stable combinations of entries are included. Args: entries ([PourbaixEntry]): list of PourbaixEntries to construct the convex hull Returns: list of entries and stable facets corresponding to that list of entries """ ion_entries = [entry for entry in entries if entry.phase_type == "Ion"] solid_entries = [entry for entry in entries if entry.phase_type == "Solid"] # Pre-filter solids based on min at each composition logger.debug("Pre-filtering solids by min energy at each composition") sorted_entries = sorted( solid_entries, key=lambda x: (x.composition.reduced_composition, x.entry.energy_per_atom)) grouped_by_composition = itertools.groupby( sorted_entries, key=lambda x: x.composition.reduced_composition) min_entries = [list(grouped_entries)[0] for comp, grouped_entries in grouped_by_composition] min_entries += ion_entries logger.debug("Constructing nph-nphi-composition points for qhull") vecs = self._convert_entries_to_points(min_entries) maxes = np.max(vecs[:, :3], axis=0) extra_point = np.concatenate( [maxes, np.ones(self.dim) / self.dim], axis=0) # Add padding for extra point pad = 1000 extra_point[2] += pad points = np.concatenate([vecs, np.array([extra_point])], axis=0) logger.debug("Constructing convex hull in nph-nphi-composition space") hull = ConvexHull(points, qhull_options="QJ i") # Create facets and remove top facets = [facet for facet in hull.simplices if not len(points) - 1 in facet] if self.dim > 1: logger.debug("Filtering facets by pourbaix composition") valid_facets = [] for facet in facets: comps = vecs[facet][:, 3:] full_comps = np.concatenate([ comps, 1 - np.sum(comps, axis=1).reshape(len(comps), 1)], axis=1) # Ensure an compositional interior point exists in the simplex if np.linalg.matrix_rank(full_comps) > self.dim: valid_facets.append(facet) else: valid_facets = facets return min_entries, valid_facets def _preprocess_pourbaix_entries(self, entries, nproc=None): """ Generates multi-entries for pourbaix diagram Args: entries ([PourbaixEntry]): list of PourbaixEntries to preprocess into MultiEntries nproc (int): number of processes to be used in parallel treatment of entry combos Returns: ([MultiEntry]) list of stable MultiEntry candidates """ # Get composition tot_comp = Composition(self._elt_comp) min_entries, valid_facets = self._get_hull_in_nph_nphi_space(entries) combos = [] for facet in valid_facets: for i in range(1, self.dim + 2): these_combos = list() for combo in itertools.combinations(facet, i): these_entries = [min_entries[i] for i in combo] these_combos.append(frozenset(these_entries)) combos.append(these_combos) all_combos = set(itertools.chain.from_iterable(combos)) list_combos = [] for i in all_combos: list_combos.append(list(i)) all_combos = list_combos multi_entries = [] # Parallel processing of multi-entry generation if nproc is not None: f = partial(self.process_multientry, prod_comp=tot_comp) with Pool(nproc) as p: multi_entries = list(tqdm(p.imap(f, all_combos), total=len(all_combos))) multi_entries = list(filter(bool, multi_entries)) else: # Serial processing of multi-entry generation for combo in tqdm(all_combos): multi_entry = self.process_multientry(combo, prod_comp=tot_comp) if multi_entry: multi_entries.append(multi_entry) return multi_entries def _generate_multielement_entries(self, entries, nproc=None): """ Create entries for multi-element Pourbaix construction. This works by finding all possible linear combinations of entries that can result in the specified composition from the initialized comp_dict. Args: entries ([PourbaixEntries]): list of pourbaix entries to process into MultiEntries nproc (int): number of processes to be used in parallel treatment of entry combos """ N = len(self._elt_comp) # No. of elements total_comp = Composition(self._elt_comp) # generate all combinations of compounds that have all elements entry_combos = [itertools.combinations( entries, j + 1) for j in range(N)] entry_combos = itertools.chain.from_iterable(entry_combos) entry_combos = filter(lambda x: total_comp < MultiEntry(x).composition, entry_combos) # Generate and filter entries processed_entries = [] total = sum([comb(len(entries), j + 1) for j in range(N)]) if total > 1e6: warnings.warn("Your pourbaix diagram includes {} entries and may " "take a long time to generate.".format(total)) # Parallel processing of multi-entry generation if nproc is not None: f = partial(self.process_multientry, prod_comp=total_comp) with Pool(nproc) as p: processed_entries = list(tqdm(p.imap(f, entry_combos), total=total)) processed_entries = list(filter(bool, processed_entries)) # Serial processing of multi-entry generation else: for entry_combo in entry_combos: processed_entry = self.process_multientry(entry_combo, total_comp) if processed_entry is not None: processed_entries.append(processed_entry) return processed_entries @staticmethod def process_multientry(entry_list, prod_comp, coeff_threshold=1e-4): """ Static method for finding a multientry based on a list of entries and a product composition. Essentially checks to see if a valid aqueous reaction exists between the entries and the product composition and returns a MultiEntry with weights according to the coefficients if so. Args: entry_list ([Entry]): list of entries from which to create a MultiEntry prod_comp (Composition): composition constraint for setting weights of MultiEntry coeff_threshold (float): threshold of stoichiometric coefficients to filter, if weights are lower than this value, the entry is not returned """ dummy_oh = [Composition("H"), Composition("O")] try: # Get balanced reaction coeffs, ensuring all < 0 or conc thresh # Note that we get reduced compositions for solids and non-reduced # compositions for ions because ions aren't normalized due to # their charge state. entry_comps = [e.composition for e in entry_list] rxn = Reaction(entry_comps + dummy_oh, [prod_comp]) react_coeffs = [-rxn.get_coeff(comp) for comp in entry_comps] all_coeffs = react_coeffs + [rxn.get_coeff(prod_comp)] # Check if reaction coeff threshold met for pourbaix compounds # All reactant/product coefficients must be positive nonzero if all([coeff > coeff_threshold for coeff in all_coeffs]): return MultiEntry(entry_list, weights=react_coeffs) else: return None except ReactionError: return None @staticmethod def get_pourbaix_domains(pourbaix_entries, limits=None): """ Returns a set of pourbaix stable domains (i. e. polygons) in pH-V space from a list of pourbaix_entries This function works by using scipy's HalfspaceIntersection function to construct all of the 2-D polygons that form the boundaries of the planes corresponding to individual entry gibbs free energies as a function of pH and V. Hyperplanes of the form a*pH + b*V + 1 - g(0, 0) are constructed and supplied to HalfspaceIntersection, which then finds the boundaries of each pourbaix region using the intersection points. Args: pourbaix_entries ([PourbaixEntry]): Pourbaix entries with which to construct stable pourbaix domains limits ([[float]]): limits in which to do the pourbaix analysis Returns: Returns a dict of the form {entry: [boundary_points]}. The list of boundary points are the sides of the N-1 dim polytope bounding the allowable ph-V range of each entry. """ if limits is None: limits = [[-2, 16], [-4, 4]] # Get hyperplanes hyperplanes = [np.array([-PREFAC * entry.npH, -entry.nPhi, 0, -entry.energy]) * entry.normalization_factor for entry in pourbaix_entries] hyperplanes = np.array(hyperplanes) hyperplanes[:, 2] = 1 max_contribs = np.max(np.abs(hyperplanes), axis=0) g_max = np.dot(-max_contribs, [limits[0][1], limits[1][1], 0, 1]) # Add border hyperplanes and generate HalfspaceIntersection border_hyperplanes = [[-1, 0, 0, limits[0][0]], [1, 0, 0, -limits[0][1]], [0, -1, 0, limits[1][0]], [0, 1, 0, -limits[1][1]], [0, 0, -1, 2 * g_max]] hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes]) interior_point = np.average(limits, axis=1).tolist() + [g_max] hs_int = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point)) # organize the boundary points by entry pourbaix_domains = {entry: [] for entry in pourbaix_entries} for intersection, facet in zip(hs_int.intersections, hs_int.dual_facets): for v in facet: if v < len(pourbaix_entries): this_entry = pourbaix_entries[v] pourbaix_domains[this_entry].append(intersection) # Remove entries with no pourbaix region pourbaix_domains = {k: v for k, v in pourbaix_domains.items() if v} pourbaix_domain_vertices = {} for entry, points in pourbaix_domains.items(): points = np.array(points)[:, :2] # Initial sort to ensure consistency points = points[np.lexsort(np.transpose(points))] center = np.average(points, axis=0) points_centered = points - center # Sort points by cross product of centered points, # isn't strictly necessary but useful for plotting tools points_centered = sorted(points_centered, key=cmp_to_key(lambda x, y: x[0] * y[1] - x[1] * y[0])) points = points_centered + center # Create simplices corresponding to pourbaix boundary simplices = [Simplex(points[indices]) for indices in ConvexHull(points).simplices] pourbaix_domains[entry] = simplices pourbaix_domain_vertices[entry] = points return pourbaix_domains, pourbaix_domain_vertices def find_stable_entry(self, pH, V): """ Finds stable entry at a pH,V condition Args: pH (float): pH to find stable entry V (float): V to find stable entry Returns: """ energies_at_conditions = [e.normalized_energy_at_conditions(pH, V) for e in self.stable_entries] return self.stable_entries[np.argmin(energies_at_conditions)] def get_decomposition_energy(self, entry, pH, V): """ Finds decomposition to most stable entries in eV/atom, supports vectorized inputs for pH and V Args: entry (PourbaixEntry): PourbaixEntry corresponding to compound to find the decomposition for pH (float, [float]): pH at which to find the decomposition V (float, [float]): voltage at which to find the decomposition Returns: Decomposition energy for the entry, i. e. the energy above the "pourbaix hull" in eV/atom at the given conditions """ # Check composition consistency between entry and Pourbaix diagram: pbx_comp = Composition(self._elt_comp).fractional_composition entry_pbx_comp = Composition( {elt: coeff for elt, coeff in entry.composition.items() if elt not in ELEMENTS_HO}).fractional_composition if entry_pbx_comp != pbx_comp: raise ValueError("Composition of stability entry does not match " "Pourbaix Diagram") entry_normalized_energy = entry.normalized_energy_at_conditions(pH, V) hull_energy = self.get_hull_energy(pH, V) decomposition_energy = entry_normalized_energy - hull_energy # Convert to eV/atom instead of eV/normalized formula unit decomposition_energy /= entry.normalization_factor decomposition_energy /= entry.composition.num_atoms return decomposition_energy def get_hull_energy(self, pH, V): """ Gets the minimum energy of the pourbaix "basin" that is formed from the stable pourbaix planes. Vectorized. Args: pH (float or [float]): pH at which to find the hull energy V (float or [float]): V at which to find the hull energy Returns: (float or [float]) minimum pourbaix energy at conditions """ all_gs = np.array([e.normalized_energy_at_conditions( pH, V) for e in self.stable_entries]) base = np.min(all_gs, axis=0) return base def get_stable_entry(self, pH, V): """ Gets the stable entry at a given pH, V condition Args: pH (float): pH at a given condition V (float): V at a given condition Returns: (PourbaixEntry or MultiEntry): pourbaix or multi-entry corresponding ot the minimum energy entry at a given pH, V condition """ all_gs = np.array([e.normalized_energy_at_conditions( pH, V) for e in self.stable_entries]) return self.stable_entries[np.argmin(all_gs)] @property def stable_entries(self): """ Returns the stable entries in the Pourbaix diagram. """ return list(self._stable_domains.keys()) @property def unstable_entries(self): """ Returns all unstable entries in the Pourbaix diagram """ return [e for e in self.all_entries if e not in self.stable_entries] @property def all_entries(self): """ Return all entries used to generate the pourbaix diagram """ return self._processed_entries @property def unprocessed_entries(self): """ Return unprocessed entries """ return self._unprocessed_entries def as_dict(self, include_unprocessed_entries=False): """ Args: include_unprocessed_entries (): Whether to include unprocessed entries. Returns: MSONable dict. """ if include_unprocessed_entries: entries = [e.as_dict() for e in self._unprocessed_entries] else: entries = [e.as_dict() for e in self._processed_entries] d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "entries": entries, "comp_dict": self._elt_comp, "conc_dict": self._conc_dict} return d @classmethod def from_dict(cls, d): """ Args: d (): Dict representation. Returns: PourbaixDiagram """ decoded_entries = MontyDecoder().process_decoded(d['entries']) return cls(decoded_entries, d.get('comp_dict'), d.get('conc_dict')) class PourbaixPlotter: """ A plotter class for phase diagrams. """ def __init__(self, pourbaix_diagram): """ Args: pourbaix_diagram (PourbaixDiagram): A PourbaixDiagram object. """ self._pbx = pourbaix_diagram def show(self, *args, **kwargs): """ Shows the pourbaix plot Args: *args: args to get_pourbaix_plot **kwargs: kwargs to get_pourbaix_plot Returns: None """ plt = self.get_pourbaix_plot(*args, **kwargs) plt.show() def get_pourbaix_plot(self, limits=None, title="", label_domains=True, plt=None): """ Plot Pourbaix diagram. Args: limits: 2D list containing limits of the Pourbaix diagram of the form [[xlo, xhi], [ylo, yhi]] title (str): Title to display on plot label_domains (bool): whether to label pourbaix domains plt (pyplot): Pyplot instance for plotting Returns: plt (pyplot) - matplotlib plot object with pourbaix diagram """ if limits is None: limits = [[-2, 16], [-3, 3]] plt = plt or pretty_plot(16) xlim = limits[0] ylim = limits[1] h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC], [xlim[1], -xlim[1] * PREFAC]]) o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23], [xlim[1], -xlim[1] * PREFAC + 1.23]]) neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]]) V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]]) ax = plt.gca() ax.set_xlim(xlim) ax.set_ylim(ylim) lw = 3 plt.plot(h_line[0], h_line[1], "r--", linewidth=lw) plt.plot(o_line[0], o_line[1], "r--", linewidth=lw) plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw) plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw) for entry, vertices in self._pbx._stable_domain_vertices.items(): center = np.average(vertices, axis=0) x, y = np.transpose(np.vstack([vertices, vertices[0]])) plt.plot(x, y, 'k-', linewidth=lw) if label_domains: plt.annotate(generate_entry_label(entry), center, ha='center', va='center', fontsize=20, color="b").draggable() plt.xlabel("pH") plt.ylabel("E (V)") plt.title(title, fontsize=20, fontweight='bold') return plt def plot_entry_stability(self, entry, pH_range=None, pH_resolution=100, V_range=None, V_resolution=100, e_hull_max=1, cmap='RdYlBu_r', **kwargs): """ Args: entry (): pH_range (): pH_resolution (): V_range (): V_resolution (): e_hull_max (): cmap (): **kwargs (): Returns: """ if pH_range is None: pH_range = [-2, 16] if V_range is None: V_range = [-3, 3] # plot the Pourbaix diagram plt = self.get_pourbaix_plot(**kwargs) pH, V = np.mgrid[pH_range[0]:pH_range[1]:pH_resolution * 1j, V_range[0]:V_range[1]:V_resolution * 1j] stability = self._pbx.get_decomposition_energy(entry, pH, V) # Plot stability map plt.pcolor(pH, V, stability, cmap=cmap, vmin=0, vmax=e_hull_max) cbar = plt.colorbar() cbar.set_label("Stability of {} (eV/atom)".format( generate_entry_label(entry))) # Set ticklabels # ticklabels = [t.get_text() for t in cbar.ax.get_yticklabels()] # ticklabels[-1] = '>={}'.format(ticklabels[-1]) # cbar.ax.set_yticklabels(ticklabels) return plt def domain_vertices(self, entry): """ Returns the vertices of the Pourbaix domain. Args: entry: Entry for which domain vertices are desired Returns: list of vertices """ return self._pbx._stable_domain_vertices[entry] def generate_entry_label(entry): """ Generates a label for the pourbaix plotter Args: entry (PourbaixEntry or MultiEntry): entry to get a label for """ if isinstance(entry, MultiEntry): return " + ".join([latexify_ion(latexify(e.name)) for e in entry.entry_list]) else: return latexify_ion(latexify(entry.name)) def latexify_ion(formula): """ Convert a formula to latex format. Args: formula (str): Formula Returns: Latex string. """ return re.sub(r"()\[([^)]*)\]", r"\1$^{\2}$", formula)
gVallverdu/pymatgen
pymatgen/analysis/pourbaix_diagram.py
Python
mit
41,344
[ "pymatgen" ]
1dbd25021f6bf1921f8c68bd0f689a57ac9eda616729383c3c0240e6d978c56d
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018, Chris Houseknecht <@chouseknecht> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: k8s short_description: Manage Kubernetes (K8s) objects version_added: "2.6" author: - "Chris Houseknecht (@chouseknecht)" - "Fabian von Feilitzsch (@fabianvf)" description: - Use the OpenShift Python client to perform CRUD operations on K8s objects. - Pass the object definition from a source file or inline. See examples for reading files and using Jinja templates or vault-encrypted files. - Access to the full range of K8s APIs. - Use the M(k8s_info) module to obtain a list of items about an object of type C(kind) - Authenticate using either a config file, certificates, password or token. - Supports check mode. extends_documentation_fragment: - k8s_state_options - k8s_name_options - k8s_resource_options - k8s_auth_options notes: - If you are trying to remove an item from an associative array/dictionary, for example a label or an annotation, you will need to explicitly set the value of the item to be removed to `null`. Simply deleting the entry in the dictionary will not remove it from openshift or kubernetes. options: merge_type: description: - Whether to override the default patch merge approach with a specific type. By default, the strategic merge will typically be used. - For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may want to use C(merge) if you see "strategic merge patch format is not supported" - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment) - Requires openshift >= 0.6.2 - If more than one merge_type is given, the merge_types will be tried in order - If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default is simply C(strategic-merge). - mutually exclusive with C(apply) choices: - json - merge - strategic-merge type: list version_added: "2.7" wait: description: - Whether to wait for certain resource kinds to end up in the desired state. By default the module exits once Kubernetes has received the request - Implemented for C(state=present) for C(Deployment), C(DaemonSet) and C(Pod), and for C(state=absent) for all resource kinds. - For resource kinds without an implementation, C(wait) returns immediately unless C(wait_condition) is set. default: no type: bool version_added: "2.8" wait_timeout: description: - How long in seconds to wait for the resource to end up in the desired state. Ignored if C(wait) is not set. default: 120 version_added: "2.8" wait_condition: description: - Specifies a custom condition on the status to wait for. Ignored if C(wait) is not set or is set to False. suboptions: type: description: - The type of condition to wait for. For example, the C(Pod) resource will set the C(Ready) condition (among others) - Required if you are specifying a C(wait_condition). If left empty, the C(wait_condition) field will be ignored. - The possible types for a condition are specific to each resource type in Kubernetes. See the API documentation of the status field for a given resource to see possible choices. status: description: - The value of the status field in your desired condition. - For example, if a C(Deployment) is paused, the C(Progressing) C(type) will have the C(Unknown) status. choices: - True - False - Unknown reason: description: - The value of the reason field in your desired condition - For example, if a C(Deployment) is paused, The C(Progressing) c(type) will have the C(DeploymentPaused) reason. - The possible reasons in a condition are specific to each resource type in Kubernetes. See the API documentation of the status field for a given resource to see possible choices. version_added: "2.8" validate: description: - how (if at all) to validate the resource definition against the kubernetes schema. Requires the kubernetes-validate python module suboptions: fail_on_error: description: whether to fail on validation errors. required: yes type: bool version: description: version of Kubernetes to validate against. defaults to Kubernetes server version strict: description: whether to fail when passing unexpected properties default: no type: bool version_added: "2.8" append_hash: description: - Whether to append a hash to a resource name for immutability purposes - Applies only to ConfigMap and Secret resources - The parameter will be silently ignored for other resource kinds - The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including the generated hash and append_hash=no) type: bool version_added: "2.8" apply: description: - C(apply) compares the desired resource definition with the previously supplied resource definition, ignoring properties that are automatically generated - C(apply) works better with Services than 'force=yes' - C(apply) defaults to True if the openshift library is new enough to support it (0.9.0 or newer) - mutually exclusive with C(merge_type) type: bool version_added: "2.9" requirements: - "python >= 2.7" - "openshift >= 0.6" - "PyYAML >= 3.11" ''' EXAMPLES = ''' - name: Create a k8s namespace k8s: name: testing api_version: v1 kind: Namespace state: present - name: Create a Service object from an inline definition k8s: state: present definition: apiVersion: v1 kind: Service metadata: name: web namespace: testing labels: app: galaxy service: web spec: selector: app: galaxy service: web ports: - protocol: TCP targetPort: 8000 name: port-8000-tcp port: 8000 - name: Create a Service object by reading the definition from a file k8s: state: present src: /testing/service.yml - name: Remove an existing Service object k8s: state: absent api_version: v1 kind: Service namespace: testing name: web # Passing the object definition from a file - name: Create a Deployment by reading the definition from a local file k8s: state: present src: /testing/deployment.yml - name: >- Read definition file from the Ansible controller file system. If the definition file has been encrypted with Ansible Vault it will automatically be decrypted. k8s: state: present definition: "{{ lookup('file', '/testing/deployment.yml') }}" - name: Read definition file from the Ansible controller file system after Jinja templating k8s: state: present definition: "{{ lookup('template', '/testing/deployment.yml') }}" - name: fail on validation errors k8s: state: present definition: "{{ lookup('template', '/testing/deployment.yml') }}" validate: fail_on_error: yes - name: warn on validation errors, check for unexpected properties k8s: state: present definition: "{{ lookup('template', '/testing/deployment.yml') }}" validate: fail_on_error: no strict: yes ''' RETURN = ''' result: description: - The created, patched, or otherwise present object. Will be empty in the case of a deletion. returned: success type: complex contains: api_version: description: The versioned schema of this representation of an object. returned: success type: str kind: description: Represents the REST resource this object represents. returned: success type: str metadata: description: Standard object metadata. Includes name, namespace, annotations, labels, etc. returned: success type: complex spec: description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind). returned: success type: complex status: description: Current status details for the object. returned: success type: complex items: description: Returned only when multiple yaml documents are passed to src or resource_definition returned: when resource_definition or src contains list of objects type: list duration: description: elapsed time of task in seconds returned: when C(wait) is true type: int sample: 48 ''' from ansible.module_utils.k8s.raw import KubernetesRawModule def main(): KubernetesRawModule().execute_module() if __name__ == '__main__': main()
pgmillon/ansible
lib/ansible/modules/clustering/k8s/k8s.py
Python
gpl-3.0
9,564
[ "Galaxy" ]
851c3231d2ad171f2ebb2f21e634799159c1f4422573b2103e4303c52cffe88c
#!/usr/bin/python """ Likelihood functions for various data. """ import numpy as np import pylab as P from scipy.special import erf def load_mauch_lf(fname="../lumfns/lumfunc_6dfgs.dat", h=0.7, starburst_corr=False): """ Load 6dFGS star-forming radio galaxy data from Mauch & Sadler (astro-ph/0612018). (z_median = 0.035, assumed h=0.7, omega_m=0.3) """ h_ms = 0.7 # Load data from file log10Lum, log10Phi, errp, errm = np.genfromtxt(fname).T fac = 2.5/np.log(10.) # Convert d/d(mag) to 1/d(logL) # Convert luminosity units L = 10.**log10Lum * 1e7 # erg/s/Hz # Approximate correction to luminosity at low-z for different cosmologies, # assuming d_L ~ c/H_0. L *= (h / h_ms)**2. # Approximate correction to the volume, taking into account different H_0 Vfac = (h / h_ms)**3. # # Calculate Phi,and rescale log-errors Phi = fac * Vfac * 10.**log10Phi # Unit rescaling *should not* be applied to log-errors # (the rescaling is already included in the units of Phi) #errp += np.log10(fac * Vfac) #errm += np.log10(fac * Vfac) # Apply correction to remove "starburst" galaxies assumed to be # contaminating the LF. Based on fit to the ratio of "normal" star-forming # to "total" (normal+starburst) galaxy radio LFs (Yun, Reddy & Condon 2001). if starburst_corr: Lref = 6e29 # Fitting parameter corr = 0.5 * ( 1. - erf(L/Lref - 1.) ) Phi *= corr # FIXME: Does not correct the errorbars return L, Phi, errp, errm def load_gama_lf(band, froot="../lumfns/lf%s_z0_driver12.data", h=0.7): """ Load GAMA optical luminosity functions from Driver et al. (2012). (assumes omega_m = 0.27, and in h units) """ # Load GAMA binned luminosity fun. for a given band gama_mag, gama_n, gama_err, gama_ngal = np.genfromtxt(froot % band).T gama_mag += 5.*np.log10(h) gama_n *= h**3. # Convert (Mpc/h)^-3 -> (Mpc)^-3 gama_err *= h**3. # FIXME: No conversion for different Omega_M # Remove unconstrained bins idxs = np.where(gama_err > 0.) return gama_mag[idxs], gama_n[idxs], gama_err[idxs] def load_sdss_smf(froot="../lumfns/moustakas_smf.dat", h=0.7, convert_errors=False, mstar_min=None): """ Load SDSS-GALEX stellar mass functions (z = 0.01 - 0.2) from Moustakas et al. 2013 [1301.1688]. """ # Stellar mass function from Table 3, with columns for all, star-forming # only, and quiescent-only logms, \ all_logphi, all_errp, all_errm, all_sig, all_N, \ sf_logphi, sf_errp, sf_errm, sf_sig, sf_N, \ qu_logphi, qu_errp, qu_errm, qu_sig, qu_N = np.genfromtxt(froot).T # Convert units # Stellar mass in units of log_10(h_70^-2 Msun), where # h_70 = H_0 / (70 km/s/Mpc). Convert to Msun units. logms += -np.log10((h / 0.7)**2.) ms = 10.**logms # Stellar mass function, in units of log_10(h_70^3 Mpc^-3 dex^-1) # Convert to log_10 (Mpc^-3), i.e. dn/dlogM/dV all_logphi += np.log10((h / 0.7)**3. / np.log(10.)) sf_logphi += np.log10((h / 0.7)**3. / np.log(10.)) qu_logphi += np.log10((h / 0.7)**3. / np.log(10.)) # Convert from log-space all_phi = 10.**all_logphi sf_phi = 10.**sf_logphi qu_phi = 10.**qu_logphi # If requested, convert errors from log space if convert_errors: all_errp = all_phi * (10.**all_errp - 1.) sf_errp = sf_phi * (10.**sf_errp - 1.) qu_errp = qu_phi * (10.**qu_errp - 1.) all_errm = all_phi * (1. - 10.**all_errm) sf_errm = sf_phi * (1. - 10.**sf_errm) qu_errm = qu_phi * (1. - 10.**qu_errm) # Apply a cut (minimum) in mstar if mstar_min is not None: idxs = np.where(ms >= mstar_min) ms, sf_phi, sf_errp, sf_errm, qu_phi, qu_errp, qu_errm = \ [f[idxs] for f in [ms, sf_phi, sf_errp, sf_errm, qu_phi, qu_errp, qu_errm]] return ms, sf_phi, sf_errp, sf_errm, qu_phi, qu_errp, qu_errm
philbull/ghost
likelihoods.py
Python
mit
4,085
[ "Galaxy" ]
95f2703afe0fe9649c69729fe95f94d5a7aba18a548bffb0a3b8e177fc4cad2d
import sys from py._code.code import FormattedExcinfo import py import pytest import warnings import inspect import _pytest from _pytest._code.code import TerminalRepr from _pytest.compat import ( NOTSET, exc_clear, _format_args, getfslineno, get_real_func, is_generator, isclass, getimfunc, getlocation, getfuncargnames, safe_getattr, ) def pytest_sessionstart(session): session._fixturemanager = FixtureManager(session) scopename2class = {} scope2props = dict(session=()) scope2props["module"] = ("fspath", "module") scope2props["class"] = scope2props["module"] + ("cls",) scope2props["instance"] = scope2props["class"] + ("instance", ) scope2props["function"] = scope2props["instance"] + ("function", "keywords") def scopeproperty(name=None, doc=None): def decoratescope(func): scopename = name or func.__name__ def provide(self): if func.__name__ in scope2props[self.scope]: return func(self) raise AttributeError("%s not available in %s-scoped context" % ( scopename, self.scope)) return property(provide, None, None, func.__doc__) return decoratescope def pytest_namespace(): scopename2class.update({ 'class': pytest.Class, 'module': pytest.Module, 'function': pytest.Item, }) return { 'fixture': fixture, 'yield_fixture': yield_fixture, 'collect': {'_fillfuncargs': fillfixtures} } def get_scope_node(node, scope): cls = scopename2class.get(scope) if cls is None: if scope == "session": return node.session raise ValueError("unknown scope") return node.getparent(cls) def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): # this function will transform all collected calls to a functions # if they use direct funcargs (i.e. direct parametrization) # because we want later test execution to be able to rely on # an existing FixtureDef structure for all arguments. # XXX we can probably avoid this algorithm if we modify CallSpec2 # to directly care for creating the fixturedefs within its methods. if not metafunc._calls[0].funcargs: return # this function call does not have direct parametrization # collect funcargs of all callspecs into a list of values arg2params = {} arg2scope = {} for callspec in metafunc._calls: for argname, argvalue in callspec.funcargs.items(): assert argname not in callspec.params callspec.params[argname] = argvalue arg2params_list = arg2params.setdefault(argname, []) callspec.indices[argname] = len(arg2params_list) arg2params_list.append(argvalue) if argname not in arg2scope: scopenum = callspec._arg2scopenum.get(argname, scopenum_function) arg2scope[argname] = scopes[scopenum] callspec.funcargs.clear() # register artificial FixtureDef's so that later at test execution # time we can rely on a proper FixtureDef to exist for fixture setup. arg2fixturedefs = metafunc._arg2fixturedefs for argname, valuelist in arg2params.items(): # if we have a scope that is higher than function we need # to make sure we only ever create an according fixturedef on # a per-scope basis. We thus store and cache the fixturedef on the # node related to the scope. scope = arg2scope[argname] node = None if scope != "function": node = get_scope_node(collector, scope) if node is None: assert scope == "class" and isinstance(collector, pytest.Module) # use module-level collector for class-scope (for now) node = collector if node and argname in node._name2pseudofixturedef: arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] else: fixturedef = FixtureDef(fixturemanager, '', argname, get_direct_param_fixture_func, arg2scope[argname], valuelist, False, False) arg2fixturedefs[argname] = [fixturedef] if node is not None: node._name2pseudofixturedef[argname] = fixturedef def getfixturemarker(obj): """ return fixturemarker or None if it doesn't exist or raised exceptions.""" try: return getattr(obj, "_pytestfixturefunction", None) except Exception: # some objects raise errors like request (from flask import request) # we don't expect them to be fixture functions return None def get_parametrized_fixture_keys(item, scopenum): """ return list of keys for all parametrized arguments which match the specified scope. """ assert scopenum < scopenum_function # function try: cs = item.callspec except AttributeError: pass else: # cs.indictes.items() is random order of argnames but # then again different functions (items) can change order of # arguments so it doesn't matter much probably for argname, param_index in cs.indices.items(): if cs._arg2scopenum[argname] != scopenum: continue if scopenum == 0: # session key = (argname, param_index) elif scopenum == 1: # module key = (argname, param_index, item.fspath) elif scopenum == 2: # class key = (argname, param_index, item.fspath, item.cls) yield key # algorithm for sorting on a per-parametrized resource setup basis # it is called for scopenum==0 (session) first and performs sorting # down to the lower scopes such as to minimize number of "high scope" # setups and teardowns def reorder_items(items): argkeys_cache = {} for scopenum in range(0, scopenum_function): argkeys_cache[scopenum] = d = {} for item in items: keys = set(get_parametrized_fixture_keys(item, scopenum)) if keys: d[item] = keys return reorder_items_atscope(items, set(), argkeys_cache, 0) def reorder_items_atscope(items, ignore, argkeys_cache, scopenum): if scopenum >= scopenum_function or len(items) < 3: return items items_done = [] while 1: items_before, items_same, items_other, newignore = \ slice_items(items, ignore, argkeys_cache[scopenum]) items_before = reorder_items_atscope( items_before, ignore, argkeys_cache,scopenum+1) if items_same is None: # nothing to reorder in this scope assert items_other is None return items_done + items_before items_done.extend(items_before) items = items_same + items_other ignore = newignore def slice_items(items, ignore, scoped_argkeys_cache): # we pick the first item which uses a fixture instance in the # requested scope and which we haven't seen yet. We slice the input # items list into a list of items_nomatch, items_same and # items_other if scoped_argkeys_cache: # do we need to do work at all? it = iter(items) # first find a slicing key for i, item in enumerate(it): argkeys = scoped_argkeys_cache.get(item) if argkeys is not None: argkeys = argkeys.difference(ignore) if argkeys: # found a slicing key slicing_argkey = argkeys.pop() items_before = items[:i] items_same = [item] items_other = [] # now slice the remainder of the list for item in it: argkeys = scoped_argkeys_cache.get(item) if argkeys and slicing_argkey in argkeys and \ slicing_argkey not in ignore: items_same.append(item) else: items_other.append(item) newignore = ignore.copy() newignore.add(slicing_argkey) return (items_before, items_same, items_other, newignore) return items, None, None, None class FuncargnamesCompatAttr: """ helper class so that Metafunc, Function and FixtureRequest don't need to each define the "funcargnames" compatibility attribute. """ @property def funcargnames(self): """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" return self.fixturenames def fillfixtures(function): """ fill missing funcargs for a test function. """ try: request = function._request except AttributeError: # XXX this special code path is only expected to execute # with the oejskit plugin. It uses classes with funcargs # and we thus have to work a bit to allow this. fm = function.session._fixturemanager fi = fm.getfixtureinfo(function.parent, function.obj, None) function._fixtureinfo = fi request = function._request = FixtureRequest(function) request._fillfixtures() # prune out funcargs for jstests newfuncargs = {} for name in fi.argnames: newfuncargs[name] = function.funcargs[name] function.funcargs = newfuncargs else: request._fillfixtures() def get_direct_param_fixture_func(request): return request.param class FuncFixtureInfo: def __init__(self, argnames, names_closure, name2fixturedefs): self.argnames = argnames self.names_closure = names_closure self.name2fixturedefs = name2fixturedefs class FixtureRequest(FuncargnamesCompatAttr): """ A request for a fixture from a test or fixture function. A request object gives access to the requesting test context and has an optional ``param`` attribute in case the fixture is parametrized indirectly. """ def __init__(self, pyfuncitem): self._pyfuncitem = pyfuncitem #: fixture for which this request is being performed self.fixturename = None #: Scope string, one of "function", "class", "module", "session" self.scope = "function" self._fixture_values = {} # argname -> fixture value self._fixture_defs = {} # argname -> FixtureDef fixtureinfo = pyfuncitem._fixtureinfo self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() self._arg2index = {} self._fixturemanager = pyfuncitem.session._fixturemanager @property def fixturenames(self): # backward incompatible note: now a readonly property return list(self._pyfuncitem._fixtureinfo.names_closure) @property def node(self): """ underlying collection node (depends on current request scope)""" return self._getscopeitem(self.scope) def _getnextfixturedef(self, argname): fixturedefs = self._arg2fixturedefs.get(argname, None) if fixturedefs is None: # we arrive here because of a a dynamic call to # getfixturevalue(argname) usage which was naturally # not known at parsing/collection time parentid = self._pyfuncitem.parent.nodeid fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) self._arg2fixturedefs[argname] = fixturedefs # fixturedefs list is immutable so we maintain a decreasing index index = self._arg2index.get(argname, 0) - 1 if fixturedefs is None or (-index > len(fixturedefs)): raise FixtureLookupError(argname, self) self._arg2index[argname] = index return fixturedefs[index] @property def config(self): """ the pytest config object associated with this request. """ return self._pyfuncitem.config @scopeproperty() def function(self): """ test function object if the request has a per-function scope. """ return self._pyfuncitem.obj @scopeproperty("class") def cls(self): """ class (can be None) where the test function was collected. """ clscol = self._pyfuncitem.getparent(pytest.Class) if clscol: return clscol.obj @property def instance(self): """ instance (can be None) on which test function was collected. """ # unittest support hack, see _pytest.unittest.TestCaseFunction try: return self._pyfuncitem._testcase except AttributeError: function = getattr(self, "function", None) if function is not None: return py.builtin._getimself(function) @scopeproperty() def module(self): """ python module object where the test function was collected. """ return self._pyfuncitem.getparent(pytest.Module).obj @scopeproperty() def fspath(self): """ the file system path of the test module which collected this test. """ return self._pyfuncitem.fspath @property def keywords(self): """ keywords/markers dictionary for the underlying node. """ return self.node.keywords @property def session(self): """ pytest session object. """ return self._pyfuncitem.session def addfinalizer(self, finalizer): """ add finalizer/teardown function to be called after the last test within the requesting test context finished execution. """ # XXX usually this method is shadowed by fixturedef specific ones self._addfinalizer(finalizer, scope=self.scope) def _addfinalizer(self, finalizer, scope): colitem = self._getscopeitem(scope) self._pyfuncitem.session._setupstate.addfinalizer( finalizer=finalizer, colitem=colitem) def applymarker(self, marker): """ Apply a marker to a single test function invocation. This method is useful if you don't want to have a keyword/marker on all function invocations. :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object created by a call to ``pytest.mark.NAME(...)``. """ try: self.node.keywords[marker.markname] = marker except AttributeError: raise ValueError(marker) def raiseerror(self, msg): """ raise a FixtureLookupError with the given message. """ raise self._fixturemanager.FixtureLookupError(None, self, msg) def _fillfixtures(self): item = self._pyfuncitem fixturenames = getattr(item, "fixturenames", self.fixturenames) for argname in fixturenames: if argname not in item.funcargs: item.funcargs[argname] = self.getfixturevalue(argname) def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): """ (deprecated) Return a testing resource managed by ``setup`` & ``teardown`` calls. ``scope`` and ``extrakey`` determine when the ``teardown`` function will be called so that subsequent calls to ``setup`` would recreate the resource. With pytest-2.3 you often do not need ``cached_setup()`` as you can directly declare a scope on a fixture function and register a finalizer through ``request.addfinalizer()``. :arg teardown: function receiving a previously setup resource. :arg setup: a no-argument function creating a resource. :arg scope: a string value out of ``function``, ``class``, ``module`` or ``session`` indicating the caching lifecycle of the resource. :arg extrakey: added to internal caching key of (funcargname, scope). """ if not hasattr(self.config, '_setupcache'): self.config._setupcache = {} # XXX weakref? cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) cache = self.config._setupcache try: val = cache[cachekey] except KeyError: self._check_scope(self.fixturename, self.scope, scope) val = setup() cache[cachekey] = val if teardown is not None: def finalizer(): del cache[cachekey] teardown(val) self._addfinalizer(finalizer, scope=scope) return val def getfixturevalue(self, argname): """ Dynamically run a named fixture function. Declaring fixtures via function argument is recommended where possible. But if you can only decide whether to use another fixture at test setup time, you may use this function to retrieve it inside a fixture or test function body. """ return self._get_active_fixturedef(argname).cached_result[0] def getfuncargvalue(self, argname): """ Deprecated, use getfixturevalue. """ from _pytest import deprecated warnings.warn( deprecated.GETFUNCARGVALUE, DeprecationWarning) return self.getfixturevalue(argname) def _get_active_fixturedef(self, argname): try: return self._fixture_defs[argname] except KeyError: try: fixturedef = self._getnextfixturedef(argname) except FixtureLookupError: if argname == "request": class PseudoFixtureDef: cached_result = (self, [0], None) scope = "function" return PseudoFixtureDef raise # remove indent to prevent the python3 exception # from leaking into the call result = self._getfixturevalue(fixturedef) self._fixture_values[argname] = result self._fixture_defs[argname] = fixturedef return fixturedef def _get_fixturestack(self): current = self l = [] while 1: fixturedef = getattr(current, "_fixturedef", None) if fixturedef is None: l.reverse() return l l.append(fixturedef) current = current._parent_request def _getfixturevalue(self, fixturedef): # prepare a subrequest object before calling fixture function # (latter managed by fixturedef) argname = fixturedef.argname funcitem = self._pyfuncitem scope = fixturedef.scope try: param = funcitem.callspec.getparam(argname) except (AttributeError, ValueError): param = NOTSET param_index = 0 if fixturedef.params is not None: frame = inspect.stack()[3] frameinfo = inspect.getframeinfo(frame[0]) source_path = frameinfo.filename source_lineno = frameinfo.lineno source_path = py.path.local(source_path) if source_path.relto(funcitem.config.rootdir): source_path = source_path.relto(funcitem.config.rootdir) msg = ( "The requested fixture has no parameter defined for the " "current test.\n\nRequested fixture '{0}' defined in:\n{1}" "\n\nRequested here:\n{2}:{3}".format( fixturedef.argname, getlocation(fixturedef.func, funcitem.config.rootdir), source_path, source_lineno, ) ) pytest.fail(msg) else: # indices might not be set if old-style metafunc.addcall() was used param_index = funcitem.callspec.indices.get(argname, 0) # if a parametrize invocation set a scope it will override # the static scope defined with the fixture function paramscopenum = funcitem.callspec._arg2scopenum.get(argname) if paramscopenum is not None: scope = scopes[paramscopenum] subrequest = SubRequest(self, scope, param, param_index, fixturedef) # check if a higher-level scoped fixture accesses a lower level one subrequest._check_scope(argname, self.scope, scope) # clear sys.exc_info before invoking the fixture (python bug?) # if its not explicitly cleared it will leak into the call exc_clear() try: # call the fixture function val = fixturedef.execute(request=subrequest) finally: # if fixture function failed it might have registered finalizers self.session._setupstate.addfinalizer(fixturedef.finish, subrequest.node) return val def _check_scope(self, argname, invoking_scope, requested_scope): if argname == "request": return if scopemismatch(invoking_scope, requested_scope): # try to report something helpful lines = self._factorytraceback() pytest.fail("ScopeMismatch: You tried to access the %r scoped " "fixture %r with a %r scoped request object, " "involved factories\n%s" %( (requested_scope, argname, invoking_scope, "\n".join(lines))), pytrace=False) def _factorytraceback(self): lines = [] for fixturedef in self._get_fixturestack(): factory = fixturedef.func fs, lineno = getfslineno(factory) p = self._pyfuncitem.session.fspath.bestrelpath(fs) args = _format_args(factory) lines.append("%s:%d: def %s%s" %( p, lineno, factory.__name__, args)) return lines def _getscopeitem(self, scope): if scope == "function": # this might also be a non-function Item despite its attribute name return self._pyfuncitem node = get_scope_node(self._pyfuncitem, scope) if node is None and scope == "class": # fallback to function item itself node = self._pyfuncitem assert node return node def __repr__(self): return "<FixtureRequest for %r>" %(self.node) class SubRequest(FixtureRequest): """ a sub request for handling getting a fixture from a test function/fixture. """ def __init__(self, request, scope, param, param_index, fixturedef): self._parent_request = request self.fixturename = fixturedef.argname if param is not NOTSET: self.param = param self.param_index = param_index self.scope = scope self._fixturedef = fixturedef self.addfinalizer = fixturedef.addfinalizer self._pyfuncitem = request._pyfuncitem self._fixture_values = request._fixture_values self._fixture_defs = request._fixture_defs self._arg2fixturedefs = request._arg2fixturedefs self._arg2index = request._arg2index self._fixturemanager = request._fixturemanager def __repr__(self): return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem) class ScopeMismatchError(Exception): """ A fixture function tries to use a different fixture function which which has a lower scope (e.g. a Session one calls a function one) """ scopes = "session module class function".split() scopenum_function = scopes.index("function") def scopemismatch(currentscope, newscope): return scopes.index(newscope) > scopes.index(currentscope) def scope2index(scope, descr, where=None): """Look up the index of ``scope`` and raise a descriptive value error if not defined. """ try: return scopes.index(scope) except ValueError: raise ValueError( "{0} {1}has an unsupported scope value '{2}'".format( descr, 'from {0} '.format(where) if where else '', scope) ) class FixtureLookupError(LookupError): """ could not return a requested Fixture (missing or invalid). """ def __init__(self, argname, request, msg=None): self.argname = argname self.request = request self.fixturestack = request._get_fixturestack() self.msg = msg def formatrepr(self): tblines = [] addline = tblines.append stack = [self.request._pyfuncitem.obj] stack.extend(map(lambda x: x.func, self.fixturestack)) msg = self.msg if msg is not None: # the last fixture raise an error, let's present # it at the requesting side stack = stack[:-1] for function in stack: fspath, lineno = getfslineno(function) try: lines, _ = inspect.getsourcelines(get_real_func(function)) except (IOError, IndexError, TypeError): error_msg = "file %s, line %s: source code not available" addline(error_msg % (fspath, lineno+1)) else: addline("file %s, line %s" % (fspath, lineno+1)) for i, line in enumerate(lines): line = line.rstrip() addline(" " + line) if line.lstrip().startswith('def'): break if msg is None: fm = self.request._fixturemanager available = [] parentid = self.request._pyfuncitem.parent.nodeid for name, fixturedefs in fm._arg2fixturedefs.items(): faclist = list(fm._matchfactories(fixturedefs, parentid)) if faclist and name not in available: available.append(name) msg = "fixture %r not found" % (self.argname,) msg += "\n available fixtures: %s" %(", ".join(sorted(available)),) msg += "\n use 'pytest --fixtures [testpath]' for help on them." return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) class FixtureLookupErrorRepr(TerminalRepr): def __init__(self, filename, firstlineno, tblines, errorstring, argname): self.tblines = tblines self.errorstring = errorstring self.filename = filename self.firstlineno = firstlineno self.argname = argname def toterminal(self, tw): # tw.line("FixtureLookupError: %s" %(self.argname), red=True) for tbline in self.tblines: tw.line(tbline.rstrip()) lines = self.errorstring.split("\n") if lines: tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker, lines[0].strip()), red=True) for line in lines[1:]: tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker, line.strip()), red=True) tw.line() tw.line("%s:%d" % (self.filename, self.firstlineno+1)) def fail_fixturefunc(fixturefunc, msg): fs, lineno = getfslineno(fixturefunc) location = "%s:%s" % (fs, lineno+1) source = _pytest._code.Source(fixturefunc) pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) def call_fixture_func(fixturefunc, request, kwargs): yieldctx = is_generator(fixturefunc) if yieldctx: it = fixturefunc(**kwargs) res = next(it) def teardown(): try: next(it) except StopIteration: pass else: fail_fixturefunc(fixturefunc, "yield_fixture function has more than one 'yield'") request.addfinalizer(teardown) else: res = fixturefunc(**kwargs) return res class FixtureDef: """ A container for a factory definition. """ def __init__(self, fixturemanager, baseid, argname, func, scope, params, unittest=False, ids=None): self._fixturemanager = fixturemanager self.baseid = baseid or '' self.has_location = baseid is not None self.func = func self.argname = argname self.scope = scope self.scopenum = scope2index( scope or "function", descr='fixture {0}'.format(func.__name__), where=baseid ) self.params = params startindex = unittest and 1 or None self.argnames = getfuncargnames(func, startindex=startindex) self.unittest = unittest self.ids = ids self._finalizer = [] def addfinalizer(self, finalizer): self._finalizer.append(finalizer) def finish(self): try: while self._finalizer: func = self._finalizer.pop() func() finally: ihook = self._fixturemanager.session.ihook ihook.pytest_fixture_post_finalizer(fixturedef=self) # even if finalization fails, we invalidate # the cached fixture value if hasattr(self, "cached_result"): del self.cached_result def execute(self, request): # get required arguments and register our own finish() # with their finalization for argname in self.argnames: fixturedef = request._get_active_fixturedef(argname) if argname != "request": fixturedef.addfinalizer(self.finish) my_cache_key = request.param_index cached_result = getattr(self, "cached_result", None) if cached_result is not None: result, cache_key, err = cached_result if my_cache_key == cache_key: if err is not None: py.builtin._reraise(*err) else: return result # we have a previous but differently parametrized fixture instance # so we need to tear it down before creating a new one self.finish() assert not hasattr(self, "cached_result") ihook = self._fixturemanager.session.ihook return ihook.pytest_fixture_setup(fixturedef=self, request=request) def __repr__(self): return ("<FixtureDef name=%r scope=%r baseid=%r >" % (self.argname, self.scope, self.baseid)) def pytest_fixture_setup(fixturedef, request): """ Execution of fixture setup. """ kwargs = {} for argname in fixturedef.argnames: fixdef = request._get_active_fixturedef(argname) result, arg_cache_key, exc = fixdef.cached_result request._check_scope(argname, request.scope, fixdef.scope) kwargs[argname] = result fixturefunc = fixturedef.func if fixturedef.unittest: if request.instance is not None: # bind the unbound method to the TestCase instance fixturefunc = fixturedef.func.__get__(request.instance) else: # the fixture function needs to be bound to the actual # request.instance so that code working with "fixturedef" behaves # as expected. if request.instance is not None: fixturefunc = getimfunc(fixturedef.func) if fixturefunc != fixturedef.func: fixturefunc = fixturefunc.__get__(request.instance) my_cache_key = request.param_index try: result = call_fixture_func(fixturefunc, request, kwargs) except Exception: fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) raise fixturedef.cached_result = (result, my_cache_key, None) return result class FixtureFunctionMarker: def __init__(self, scope, params, autouse=False, ids=None, name=None): self.scope = scope self.params = params self.autouse = autouse self.ids = ids self.name = name def __call__(self, function): if isclass(function): raise ValueError( "class fixtures not supported (may be in the future)") function._pytestfixturefunction = self return function def fixture(scope="function", params=None, autouse=False, ids=None, name=None): """ (return a) decorator to mark a fixture factory function. This decorator can be used (with or without parameters) to define a fixture function. The name of the fixture function can later be referenced to cause its invocation ahead of running tests: test modules or classes can use the pytest.mark.usefixtures(fixturename) marker. Test functions can directly use fixture names as input arguments in which case the fixture instance returned from the fixture function will be injected. :arg scope: the scope for which this fixture is shared, one of "function" (default), "class", "module" or "session". :arg params: an optional list of parameters which will cause multiple invocations of the fixture function and all of the tests using it. :arg autouse: if True, the fixture func is activated for all tests that can see it. If False (the default) then an explicit reference is needed to activate the fixture. :arg ids: list of string ids each corresponding to the params so that they are part of the test id. If no ids are provided they will be generated automatically from the params. :arg name: the name of the fixture. This defaults to the name of the decorated function. If a fixture is used in the same module in which it is defined, the function name of the fixture will be shadowed by the function arg that requests the fixture; one way to resolve this is to name the decorated function ``fixture_<fixturename>`` and then use ``@pytest.fixture(name='<fixturename>')``. Fixtures can optionally provide their values to test functions using a ``yield`` statement, instead of ``return``. In this case, the code block after the ``yield`` statement is executed as teardown code regardless of the test outcome. A fixture function must yield exactly once. """ if callable(scope) and params is None and autouse == False: # direct decoration return FixtureFunctionMarker( "function", params, autouse, name=name)(scope) if params is not None and not isinstance(params, (list, tuple)): params = list(params) return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None): """ (return a) decorator to mark a yield-fixture factory function. .. deprecated:: 3.0 Use :py:func:`pytest.fixture` directly instead. """ if callable(scope) and params is None and not autouse: # direct decoration return FixtureFunctionMarker( "function", params, autouse, ids=ids, name=name)(scope) else: return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) defaultfuncargprefixmarker = fixture() @fixture(scope="session") def pytestconfig(request): """ the pytest config object with access to command line opts.""" return request.config class FixtureManager: """ pytest fixtures definitions and information is stored and managed from this class. During collection fm.parsefactories() is called multiple times to parse fixture function definitions into FixtureDef objects and internal data structures. During collection of test functions, metafunc-mechanics instantiate a FuncFixtureInfo object which is cached per node/func-name. This FuncFixtureInfo object is later retrieved by Function nodes which themselves offer a fixturenames attribute. The FuncFixtureInfo object holds information about fixtures and FixtureDefs relevant for a particular function. An initial list of fixtures is assembled like this: - ini-defined usefixtures - autouse-marked fixtures along the collection chain up from the function - usefixtures markers at module/class/function level - test function funcargs Subsequently the funcfixtureinfo.fixturenames attribute is computed as the closure of the fixtures needed to setup the initial fixtures, i. e. fixtures needed by fixture functions themselves are appended to the fixturenames list. Upon the test-setup phases all fixturenames are instantiated, retrieved by a lookup of their FuncFixtureInfo. """ _argprefix = "pytest_funcarg__" FixtureLookupError = FixtureLookupError FixtureLookupErrorRepr = FixtureLookupErrorRepr def __init__(self, session): self.session = session self.config = session.config self._arg2fixturedefs = {} self._holderobjseen = set() self._arg2finish = {} self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] session.config.pluginmanager.register(self, "funcmanage") def getfixtureinfo(self, node, func, cls, funcargs=True): if funcargs and not hasattr(node, "nofuncargs"): if cls is not None: startindex = 1 else: startindex = None argnames = getfuncargnames(func, startindex) else: argnames = () usefixtures = getattr(func, "usefixtures", None) initialnames = argnames if usefixtures is not None: initialnames = usefixtures.args + initialnames fm = node.session._fixturemanager names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames, node) return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs) def pytest_plugin_registered(self, plugin): nodeid = None try: p = py.path.local(plugin.__file__) except AttributeError: pass else: # construct the base nodeid which is later used to check # what fixtures are visible for particular tests (as denoted # by their test id) if p.basename.startswith("conftest.py"): nodeid = p.dirpath().relto(self.config.rootdir) if p.sep != "/": nodeid = nodeid.replace(p.sep, "/") self.parsefactories(plugin, nodeid) def _getautousenames(self, nodeid): """ return a tuple of fixture names to be used. """ autousenames = [] for baseid, basenames in self._nodeid_and_autousenames: if nodeid.startswith(baseid): if baseid: i = len(baseid) nextchar = nodeid[i:i+1] if nextchar and nextchar not in ":/": continue autousenames.extend(basenames) # make sure autousenames are sorted by scope, scopenum 0 is session autousenames.sort( key=lambda x: self._arg2fixturedefs[x][-1].scopenum) return autousenames def getfixtureclosure(self, fixturenames, parentnode): # collect the closure of all fixtures , starting with the given # fixturenames as the initial set. As we have to visit all # factory definitions anyway, we also return a arg2fixturedefs # mapping so that the caller can reuse it and does not have # to re-discover fixturedefs again for each fixturename # (discovering matching fixtures for a given name/node is expensive) parentid = parentnode.nodeid fixturenames_closure = self._getautousenames(parentid) def merge(otherlist): for arg in otherlist: if arg not in fixturenames_closure: fixturenames_closure.append(arg) merge(fixturenames) arg2fixturedefs = {} lastlen = -1 while lastlen != len(fixturenames_closure): lastlen = len(fixturenames_closure) for argname in fixturenames_closure: if argname in arg2fixturedefs: continue fixturedefs = self.getfixturedefs(argname, parentid) if fixturedefs: arg2fixturedefs[argname] = fixturedefs merge(fixturedefs[-1].argnames) return fixturenames_closure, arg2fixturedefs def pytest_generate_tests(self, metafunc): for argname in metafunc.fixturenames: faclist = metafunc._arg2fixturedefs.get(argname) if faclist: fixturedef = faclist[-1] if fixturedef.params is not None: func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]]) # skip directly parametrized arguments argnames = func_params[0] if not isinstance(argnames, (tuple, list)): argnames = [x.strip() for x in argnames.split(",") if x.strip()] if argname not in func_params and argname not in argnames: metafunc.parametrize(argname, fixturedef.params, indirect=True, scope=fixturedef.scope, ids=fixturedef.ids) else: continue # will raise FixtureLookupError at setup time def pytest_collection_modifyitems(self, items): # separate parametrized setups items[:] = reorder_items(items) def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): if nodeid is not NOTSET: holderobj = node_or_obj else: holderobj = node_or_obj.obj nodeid = node_or_obj.nodeid if holderobj in self._holderobjseen: return self._holderobjseen.add(holderobj) autousenames = [] for name in dir(holderobj): # The attribute can be an arbitrary descriptor, so the attribute # access below can raise. safe_getatt() ignores such exceptions. obj = safe_getattr(holderobj, name, None) # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) # or are "@pytest.fixture" marked marker = getfixturemarker(obj) if marker is None: if not name.startswith(self._argprefix): continue if not callable(obj): continue marker = defaultfuncargprefixmarker from _pytest import deprecated self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name)) name = name[len(self._argprefix):] elif not isinstance(marker, FixtureFunctionMarker): # magic globals with __getattr__ might have got us a wrong # fixture attribute continue else: if marker.name: name = marker.name msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \ 'and be decorated with @pytest.fixture:\n%s' % name assert not name.startswith(self._argprefix), msg fixture_def = FixtureDef(self, nodeid, name, obj, marker.scope, marker.params, unittest=unittest, ids=marker.ids) faclist = self._arg2fixturedefs.setdefault(name, []) if fixture_def.has_location: faclist.append(fixture_def) else: # fixturedefs with no location are at the front # so this inserts the current fixturedef after the # existing fixturedefs from external plugins but # before the fixturedefs provided in conftests. i = len([f for f in faclist if not f.has_location]) faclist.insert(i, fixture_def) if marker.autouse: autousenames.append(name) if autousenames: self._nodeid_and_autousenames.append((nodeid or '', autousenames)) def getfixturedefs(self, argname, nodeid): """ Gets a list of fixtures which are applicable to the given node id. :param str argname: name of the fixture to search for :param str nodeid: full node id of the requesting test. :return: list[FixtureDef] """ try: fixturedefs = self._arg2fixturedefs[argname] except KeyError: return None else: return tuple(self._matchfactories(fixturedefs, nodeid)) def _matchfactories(self, fixturedefs, nodeid): for fixturedef in fixturedefs: if nodeid.startswith(fixturedef.baseid): yield fixturedef
jaraco/pytest
_pytest/fixtures.py
Python
mit
44,995
[ "VisIt" ]
a672cb3edf1ced2d5edb42fad7931b0bbebed855048f11870f2bbb07409c17e6
''' *** SHED SKIN Python-to-C++ Compiler *** Copyright 2005-2013 Mark Dufour; License GNU GPL version 3 (See LICENSE) infer.py: perform iterative type analysis we combine two techniques from the literature, to analyze both parametric polymorphism and data polymorphism adaptively. these techniques are agesen's cartesian product algorithm and plevyak's iterative flow analysis (the data polymorphic part). for details about these algorithms, see ole agesen's excellent Phd thesis. for details about the Shed Skin implementation, see mark dufour's MsC thesis. the cartesian product algorithm duplicates functions (or their graph counterpart), based on the cartesian product of possible argument types, whereas iterative flow analysis duplicates classes based on observed imprecisions at assignment points. the two integers mentioned in the graph.py description are used to keep track of duplicates along these dimensions (first class duplicate nr, then function duplicate nr). the combined technique scales reasonably well, but can explode in many cases. there are many ways to improve this. some ideas: -an iterative deepening approach, merging redundant duplicates after each deepening -add and propagate filters across variables. e.g. 'a+1; a=b' implies that a and b must be of a type that implements '__add__'. a complementary but very practical approach to (greatly) improve scalability would be to profile programs before compiling them, resulting in quite precise (lower bound) type information. type inference can then be used to 'fill in the gaps'. iterative_dataflow_analysis(): (FORWARD PHASE) -propagate types along constraint graph (propagate()) -all the while creating function duplicates using the cartesian product algorithm(cpa()) -when creating a function duplicate, fill in allocation points with correct type (ifa_seed_template()) (BACKWARD PHASE) -determine classes to be duplicated, according to found imprecision points (ifa()) -from imprecision points, follow the constraint graph (backwards) to find involved allocation points -duplicate classes, and spread them over these allocation points (CLEANUP) -quit if no further imprecision points (ifa() did not find anything) -otherwise, restore the constraint graph to its original state and restart -all the while maintaining types for each allocation point in gx.alloc_info update: we now analyze programs incrementally, adding several functions and redoing the full analysis each time. this seems to greatly help the CPA from exploding early on. ''' import random import sys from compiler.ast import Const, Node, AssAttr, Keyword, CallFunc, Getattr, Dict, List, Tuple, ListComp, Not, Compare, Name import error import graph from python import StaticClass, lookup_class_module, Function, \ Variable, lookup_var, Class, lookup_implementor, def_class from typestr import nodetypestr from virtual import analyze_virtuals INCREMENTAL = True INCREMENTAL_FUNCS = 5 INCREMENTAL_DATA = True INCREMENTAL_ALLOCS = 20 MAXITERS = 30 CPA_LIMIT = 10 class CNode: __slots__ = ['thing', 'dcpa', 'cpa', 'fakefunc', 'parent', 'defnodes', 'mv', 'constructor', 'copymetoo', 'fakert', 'in_', 'out', 'fout', 'in_list', 'callfuncs', 'nodecp'] def __init__(self, gx, thing, dcpa=0, cpa=0, parent=None, mv=None): self.gx = gx self.thing = thing self.dcpa = dcpa self.cpa = cpa self.fakefunc = None if isinstance(parent, Class): # XXX parent = None self.parent = parent self.defnodes = False # if callnode, notification nodes were made for default arguments self.mv = mv self.constructor = False # allocation site self.copymetoo = False self.fakert = False self.lambdawrapper = None self.gx.cnode[self.thing, self.dcpa, self.cpa] = self # --- in, outgoing constraints self.in_ = set() # incoming nodes self.out = set() # outgoing nodes self.fout = set() # unreal outgoing edges, used in ifa # --- iterative dataflow analysis self.in_list = 0 # node in work-list self.callfuncs = [] # callfuncs to which node is object/argument self.nodecp = set() # already analyzed cp's # XXX kill!? # --- add node to surrounding non-listcomp function if parent: # do this only once! (not when copying) while parent and isinstance(parent, Function) and parent.listcomp: parent = parent.parent if isinstance(parent, Function): if self not in parent.nodes: parent.nodes.add(self) parent.nodes_ordered.append(self) def copy(self, dcpa, cpa, worklist=None): # XXX to infer.py # if not self.mv.module.builtin: print 'copy', self if (self.thing, dcpa, cpa) in self.gx.cnode: return self.gx.cnode[self.thing, dcpa, cpa] newnode = CNode(self.gx, self.thing, dcpa, cpa, mv=self.mv) newnode.callfuncs = self.callfuncs[:] # XXX no copy? newnode.constructor = self.constructor newnode.copymetoo = self.copymetoo newnode.parent = self.parent add_to_worklist(worklist, newnode) if self.constructor or self.copymetoo or isinstance(self.thing, (Not, Compare)): # XXX XXX self.gx.types[newnode] = self.gx.types[self].copy() else: self.gx.types[newnode] = set() return newnode def types(self): if self in self.gx.types: return self.gx.types[self] else: return set() # XXX def __repr__(self): return repr((self.thing, self.dcpa, self.cpa)) def DEBUG(gx, level): return gx.debug_level >= level def nrargs(gx, node): if inode(gx, node).lambdawrapper: return inode(gx, node).lambdawrapper.largs return len(node.args) def called(func): return bool([cpas for cpas in func.cp.values() if cpas]) def get_types(gx, expr, node, merge): types = set() if merge: if expr.node in merge: types = merge[expr.node] elif node: node = (expr.node, node.dcpa, node.cpa) if node in gx.cnode: types = gx.cnode[node].types() return types def is_anon_callable(gx, expr, node, merge=None): types = get_types(gx, expr, node, merge) anon = bool([t for t in types if isinstance(t[0], Function)]) call = bool([t for t in types if isinstance(t[0], Class) and '__call__' in t[0].funcs]) return anon, call def parent_func(gx, thing): parent = inode(gx, thing).parent while parent: if not isinstance(parent, Function) or not parent.listcomp: if not isinstance(parent, StaticClass): return parent parent = parent.parent def analyze_args(gx, expr, func, node=None, skip_defaults=False, merge=None): objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func = analyze_callfunc(gx, expr, node, merge) args = [] kwdict = {} for a in expr.args: if isinstance(a, Keyword): kwdict[a.name] = a.expr else: args.append(a) formal_args = func.formals[:] if func.node.varargs: formal_args = formal_args[:-1] default_start = len(formal_args) - len(func.defaults) if ident in ['__getattr__', '__setattr__']: # property? args = args[1:] if (method_call or constructor) and not (parent_constr or anon_func): # XXX args = [None] + args argnr = 0 actuals, formals, defaults = [], [], [] missing = False for i, formal in enumerate(formal_args): if formal in kwdict: actuals.append(kwdict[formal]) formals.append(formal) elif formal.startswith('__kw_') and formal[5:] in kwdict: actuals.insert(0, kwdict[formal[5:]]) formals.insert(0, formal) elif argnr < len(args) and not formal.startswith('__kw_'): actuals.append(args[argnr]) argnr += 1 formals.append(formal) elif i >= default_start: if not skip_defaults: default = func.defaults[i - default_start] if formal.startswith('__kw_'): actuals.insert(0, default) formals.insert(0, formal) else: actuals.append(default) formals.append(formal) defaults.append(default) else: missing = True extra = args[argnr:] _error = (missing or extra) and not func.node.varargs and not func.node.kwargs and not expr.star_args and func.lambdanr is None and expr not in gx.lambdawrapper # XXX if func.node.varargs: for arg in extra: actuals.append(arg) formals.append(func.formals[-1]) return actuals, formals, defaults, extra, _error def connect_actual_formal(gx, expr, func, parent_constr=False, merge=None): pairs = [] actuals = [a for a in expr.args if not isinstance(a, Keyword)] if isinstance(func.parent, Class): formals = [f for f in func.formals if f != 'self'] else: formals = [f for f in func.formals] if parent_constr: actuals = actuals[1:] skip_defaults = False # XXX investigate and further narrow down cases where we want to skip if (func.mv.module.ident in ['time', 'string', 'collections', 'bisect', 'array', 'math', 'cStringIO', 'getopt']) or \ (func.mv.module.ident == 'random' and func.ident == 'randrange') or\ (func.mv.module.ident == 'builtin' and func.ident not in ('sort', 'sorted', 'min', 'max', '__print')): skip_defaults = True actuals, formals, _, extra, _error = analyze_args(gx, expr, func, skip_defaults=skip_defaults, merge=merge) for (actual, formal) in zip(actuals, formals): if not (isinstance(func.parent, Class) and formal == 'self'): pairs.append((actual, func.vars[formal])) return pairs, len(extra), _error # --- return list of potential call targets def callfunc_targets(gx, node, merge): objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func = analyze_callfunc(gx, node, merge=merge) funcs = [] if node.node in merge and [t for t in merge[node.node] if isinstance(t[0], Function)]: # anonymous function call funcs = [t[0] for t in merge[node.node] if isinstance(t[0], Function)] elif constructor: if ident in ('list', 'tuple', 'set', 'frozenset') and nrargs(gx, node) == 1: funcs = [constructor.funcs['__inititer__']] elif (ident, nrargs(gx, node)) in (('dict', 1), ('defaultdict', 2)): # XXX merge infer.redirect funcs = [constructor.funcs['__initdict__']] # XXX __inititer__? elif sys.platform == 'win32' and '__win32__init__' in constructor.funcs: funcs = [constructor.funcs['__win32__init__']] elif '__init__' in constructor.funcs: funcs = [constructor.funcs['__init__']] elif parent_constr: if ident != '__init__': cl = inode(gx, node).parent.parent funcs = [cl.funcs[ident]] elif direct_call: funcs = [direct_call] elif method_call: classes = set(t[0] for t in merge[objexpr] if isinstance(t[0], Class)) funcs = [cl.funcs[ident] for cl in classes if ident in cl.funcs] return funcs # --- analyze call expression: namespace, method call, direct call/constructor.. def analyze_callfunc(gx, node, node2=None, merge=None): # XXX generate target list XXX uniform Variable system! XXX node2, merge? # print 'analyze callnode', node, inode(gx, node).parent cnode = inode(gx, node) mv = cnode.mv namespace, objexpr, method_call, parent_constr = mv.module, None, False, False constructor, direct_call, ident = None, None, None # anon func call XXX refactor as __call__ method call below anon_func, is_callable = is_anon_callable(gx, node, node2, merge) if is_callable: method_call, objexpr, ident = True, node.node, '__call__' return objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func # method call if isinstance(node.node, Getattr): objexpr, ident = node.node.expr, node.node.attrname cl, module = lookup_class_module(objexpr, mv, cnode.parent) if cl: # staticmethod call if ident in cl.staticmethods: direct_call = cl.funcs[ident] return objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func # ancestor call elif ident not in ['__setattr__', '__getattr__'] and cnode.parent: thiscl = cnode.parent.parent if isinstance(thiscl, Class) and cl.ident in (x.ident for x in thiscl.ancestors_upto(None)): # XXX if lookup_implementor(cl, ident): parent_constr = True ident = ident + lookup_implementor(cl, ident) + '__' # XXX change data structure return objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func if module: # XXX elif? namespace, objexpr = module, None else: method_call = True elif isinstance(node.node, Name): ident = node.node.name # direct [constructor] call if isinstance(node.node, Name) or namespace != mv.module: if isinstance(node.node, Name): if lookup_var(ident, cnode.parent, mv=mv): return objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func if ident in namespace.mv.classes: constructor = namespace.mv.classes[ident] elif ident in namespace.mv.funcs: direct_call = namespace.mv.funcs[ident] elif ident in namespace.mv.ext_classes: constructor = namespace.mv.ext_classes[ident] elif ident in namespace.mv.ext_funcs: direct_call = namespace.mv.ext_funcs[ident] else: if namespace != mv.module: return objexpr, ident, None, False, None, False, False return objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func # --- merge constraint network along combination of given dimensions (dcpa, cpa, inheritance) # e.g. for annotation we merge everything; for code generation, we might want to create specialized code def merged(gx, nodes, inheritance=False): merge = {} if inheritance: # XXX do we really need this crap mergeinh = merged(gx, [n for n in nodes if n.thing in gx.inherited]) mergenoinh = merged(gx, [n for n in nodes if not n.thing in gx.inherited]) for node in nodes: # --- merge node types sortdefault = merge.setdefault(node.thing, set()) sortdefault.update(gx.types[node]) # --- merge inheritance nodes if inheritance: inh = gx.inheritance_relations.get(node.thing, []) # merge function variables with their inherited versions (we don't customize!) if isinstance(node.thing, Variable) and isinstance(node.thing.parent, Function): var = node.thing for inhfunc in gx.inheritance_relations.get(var.parent, []): if var.name in inhfunc.vars: if inhfunc.vars[var.name] in mergenoinh: sortdefault.update(mergenoinh[inhfunc.vars[var.name]]) for inhvar in gx.inheritance_temp_vars.get(var, []): # XXX more general if inhvar in mergenoinh: sortdefault.update(mergenoinh[inhvar]) # node is not a function variable else: for n in inh: if n in mergeinh: # XXX ook mergenoinh? sortdefault.update(mergeinh[n]) return merge def inode(gx, node): return gx.cnode[node, 0, 0] def add_constraint(gx, a, b, worklist=None): gx.constraints.add((a, b)) in_out(a, b) add_to_worklist(worklist, a) def in_out(a, b): a.out.add(b) b.in_.add(a) def add_to_worklist(worklist, node): # XXX to infer.py if worklist is not None and not node.in_list: worklist.append(node) node.in_list = 1 def class_copy(gx, cl, dcpa): for var in cl.vars.values(): # XXX if not inode(gx, var) in gx.types: continue # XXX research later inode(gx, var).copy(dcpa, 0) gx.types[gx.cnode[var, dcpa, 0]] = inode(gx, var).types().copy() for n in inode(gx, var).in_: # XXX if isinstance(n.thing, Const): add_constraint(gx, n, gx.cnode[var, dcpa, 0]) for func in cl.funcs.values(): if cl.mv.module.ident == 'builtin' and cl.ident != '__iter' and func.ident == '__iter__': # XXX hack for __iter__:__iter() itercl = def_class(gx, '__iter') gx.alloc_info[func.ident, ((cl, dcpa),), func.returnexpr[0]] = (itercl, itercl.dcpa) class_copy(gx, itercl, dcpa) itercl.dcpa += 1 func_copy(gx, func, dcpa, 0) # --- use dcpa=0,cpa=0 mold created by module visitor to duplicate function def func_copy(gx, func, dcpa, cpa, worklist=None, cart=None): # print 'funccopy', func, cart, dcpa, cpa # --- copy local end points of each constraint for (a, b) in func.constraints: if not (isinstance(a.thing, Variable) and parent_func(gx, a.thing) != func) and a.dcpa == 0: a = a.copy(dcpa, cpa, worklist) if not (isinstance(b.thing, Variable) and parent_func(gx, b.thing) != func) and b.dcpa == 0: b = b.copy(dcpa, cpa, worklist) add_constraint(gx, a, b, worklist) # --- copy other nodes for node in func.nodes: node.copy(dcpa, cpa, worklist) # --- iterative flow analysis: seed allocation sites in new template ifa_seed_template(gx, func, cart, dcpa, cpa, worklist) def print_typeset(types): l = list(types.items()) l.sort(lambda x, y: cmp(repr(x[0]), repr(y[0]))) for uh in l: if not uh[0].mv.module.builtin: print repr(uh[0]) + ':', uh[1] # , uh[0].parent print def print_state(gx): # print 'state:' print_typeset(gx.types) def print_constraints(gx): # print 'constraints:' l = list(gx.constraints) l.sort(lambda x, y: cmp(repr(x[0]), repr(y[0]))) for (a, b) in l: if not (a.mv.module.builtin and b.mv.module.builtin): print a, '->', b if not a in gx.types or not b in gx.types: print 'NOTYPE', a in gx.types, b in gx.types print # --- iterative dataflow analysis def propagate(gx): if DEBUG(gx, 1): print 'propagate' # --- initialize working sets worklist = [] changed = set() for node in gx.types: if gx.types[node]: add_to_worklist(worklist, node) expr = node.thing if (isinstance(expr, CallFunc) and not expr.args) or expr in gx.lambdawrapper: # XXX changed.add(node) for node in changed: cpa(gx, node, worklist) builtins = set(gx.builtins) types = gx.types # --- iterative dataflow analysis while worklist: callnodes = set() while worklist: a = worklist.pop(0) a.in_list = 0 for callfunc in a.callfuncs: t = (callfunc, a.dcpa, a.cpa) if t in gx.cnode: callnodes.add(gx.cnode[t]) for b in a.out.copy(): # XXX can change...? # for builtin types, the set of instance variables is known, so do not flow into non-existent ones # XXX ifa if isinstance(b.thing, Variable) and isinstance(b.thing.parent, Class): parent_ident = b.thing.parent.ident if parent_ident in builtins: if parent_ident in ['int_', 'float_', 'str_', 'none', 'bool_']: continue elif parent_ident in ['list', 'tuple', 'frozenset', 'set', 'file', '__iter', 'deque', 'array'] and b.thing.name != 'unit': continue elif parent_ident in ('dict', 'defaultdict') and b.thing.name not in ['unit', 'value']: continue elif parent_ident == 'tuple2' and b.thing.name not in ['unit', 'first', 'second']: continue typesa = types[a] typesb = types[b] oldsize = len(typesb) if b.thing in gx.filters: typesa = set([t for t in typesa if t[0] == gx.filters[b.thing]]) typesb.update(typesa) if len(typesb) > oldsize: add_to_worklist(worklist, b) for callnode in callnodes: cpa(gx, callnode, worklist) # --- determine cartesian product of possible function and argument types def possible_functions(gx, node, analysis): expr = node.thing # --- determine possible target functions objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func = analysis funcs = [] if anon_func: # anonymous call types = gx.cnode[expr.node, node.dcpa, node.cpa].types() types = [t for t in types if isinstance(t[0], Function)] # XXX XXX analyse per t, sometimes class, sometimes function.. if list(types)[0][0].parent: # method reference XXX merge below? funcs = [(f[0], f[1], (f[0].parent, f[1])) for f in types] # node.dcpa: connect to right dcpa duplicate version else: # function reference funcs = [(f[0], f[1], None) for f in types] # function call: only one version; no objtype elif constructor: funcs = [(t[0].funcs['__init__'], t[1], t) for t in node.types() if '__init__' in t[0].funcs] elif parent_constr: objtypes = gx.cnode[lookup_var('self', node.parent, mv=node.mv), node.dcpa, node.cpa].types() funcs = [(t[0].funcs[ident], t[1], None) for t in objtypes if ident in t[0].funcs] elif direct_call: funcs = [(direct_call, 0, None)] elif method_call: objtypes = gx.cnode[objexpr, node.dcpa, node.cpa].types() objtypes = [t for t in objtypes if not isinstance(t[0], Function)] # XXX funcs = [(t[0].funcs[ident], t[1], t) for t in objtypes if ident in t[0].funcs and not (isinstance(t[0], Class) and ident in t[0].staticmethods)] return funcs def possible_argtypes(gx, node, funcs, analysis, worklist): expr = node.thing objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func = analysis if funcs: func = funcs[0][0] # XXX args = [] if expr.star_args: # XXX args = [expr.star_args] elif funcs and not func.node: # XXX getattr, setattr args = expr.args elif funcs: actuals, formals, used_defaults, varargs, _ = analyze_args(gx, expr, func, node) if not node.defnodes: for i, default in enumerate(used_defaults): defnode = CNode(gx, (inode(gx, node.thing), i), node.dcpa, node.cpa, parent=func, mv=node.mv) gx.types[defnode] = set() defnode.callfuncs.append(node.thing) add_constraint(gx, gx.cnode[default, 0, 0], defnode, worklist) # XXX bad place node.defnodes = True for act, form in zip(actuals, formals): if parent_constr or not (isinstance(func.parent, Class) and form == 'self'): # XXX merge args.append(act) argtypes = [] for arg in args: if (arg, node.dcpa, node.cpa) in gx.cnode: argtypes.append(gx.cnode[arg, node.dcpa, node.cpa].types()) else: argtypes.append(inode(gx, arg).types()) # XXX def arg? # store arg count for wrappers to builtin refs if funcs and (func.lambdawrapper or node.thing in gx.lambdawrapper): while argtypes and not argtypes[-1]: argtypes = argtypes[:-1] if func.lambdawrapper: if expr.star_args and node.parent and node.parent.node.varargs: func.largs = node.parent.xargs[node.dcpa, node.cpa] - len(node.parent.formals) + 1 else: func.largs = len(argtypes) return argtypes def product(*lists): if not lists: return [()] result = [] prod = product(*lists[:-1]) for x in prod: for y in lists[-1]: result.append(x + (y,)) return result def cartesian_product(gx, node, analysis, worklist): funcs = possible_functions(gx, node, analysis) if not funcs: return [] argtypes = possible_argtypes(gx, node, funcs, analysis, worklist) alltypes = [funcs] + argtypes return product(*alltypes) def redirect(gx, c, dcpa, func, callfunc, ident, callnode, direct_call, constructor): # redirect based on number of arguments (__%s%d syntax in builtins) if func.mv.module.builtin: if isinstance(func.parent, Class): funcs = func.parent.funcs else: funcs = func.mv.funcs redir = '__%s%d' % (func.ident, len([kwarg for kwarg in callfunc.args if not isinstance(kwarg, Keyword)])) func = funcs.get(redir, func) # filter if direct_call and ident == 'filter': clnames = [x[0].ident for x in c if isinstance(x[0], Class)] if 'str_' in clnames or 'tuple' in clnames or 'tuple2' in clnames: func = func.mv.funcs['__' + ident] # staticmethod if isinstance(func.parent, Class) and func.ident in func.parent.staticmethods: dcpa = 1 # dict.__init__ if constructor and (ident, nrargs(gx, callfunc)) in (('dict', 1), ('defaultdict', 2)): clnames = [x[0].ident for x in c if isinstance(x[0], Class)] if 'dict' in clnames or 'defaultdict' in clnames: func = list(callnode.types())[0][0].funcs['__initdict__'] else: func = list(callnode.types())[0][0].funcs['__inititer__'] # dict.update if func.ident == 'update' and isinstance(func.parent, Class) and func.parent.ident in ('dict', 'defaultdict'): clnames = [x[0].ident for x in c if isinstance(x[0], Class)] if not ('dict' in clnames or 'defaultdict' in clnames): func = func.parent.funcs['updateiter'] # list, tuple if constructor and ident in ('list', 'tuple', 'set', 'frozenset') and nrargs(gx, callfunc) == 1: func = list(callnode.types())[0][0].funcs['__inititer__'] # XXX use __init__? # array if constructor and ident == 'array' and isinstance(callfunc.args[0], Const): typecode = callfunc.args[0].value array_type = None if typecode in 'bBhHiIlL': array_type = 'int' elif typecode == 'c': array_type = 'str' elif typecode in 'fd': array_type = 'float' if array_type is not None: func = list(callnode.types())[0][0].funcs['__init_%s__' % array_type] # tuple2.__getitem__(0/1) -> __getfirst__/__getsecond__ if (isinstance(callfunc.node, Getattr) and callfunc.node.attrname == '__getitem__' and isinstance(callfunc.args[0], Const) and callfunc.args[0].value in (0, 1) and func.parent.mv.module.builtin and func.parent.ident == 'tuple2'): if callfunc.args[0].value == 0: func = func.parent.funcs['__getfirst__'] else: func = func.parent.funcs['__getsecond__'] # property if isinstance(callfunc.node, Getattr) and callfunc.node.attrname in ['__setattr__', '__getattr__']: if isinstance(func.parent, Class) and callfunc.args and callfunc.args[0].value in func.parent.properties: arg = callfunc.args[0].value if callfunc.node.attrname == '__setattr__': func = func.parent.funcs[func.parent.properties[arg][1]] else: func = func.parent.funcs[func.parent.properties[arg][0]] c = c[1:] # win32 if sys.platform == 'win32' and func.mv.module.builtin and isinstance(func.parent, Class) and '__win32' + func.ident in func.parent.funcs: func = func.parent.funcs['__win32' + func.ident] return c, dcpa, func # --- cartesian product algorithm; adds interprocedural constraints def cpa(gx, callnode, worklist): analysis = analyze_callfunc(gx, callnode.thing, callnode) cp = cartesian_product(gx, callnode, analysis, worklist) if not cp: return if len(cp) > gx.cpa_limit and not gx.cpa_clean: gx.cpa_limited = True return [] objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func = analysis # --- iterate over argument type combinations for c in cp: (func, dcpa, objtype), c = c[0], c[1:] if INCREMENTAL: if not func.mv.module.builtin and func not in gx.added_funcs_set and not func.ident in ['__getattr__', '__setattr__']: if INCREMENTAL_DATA: if gx.added_allocs >= INCREMENTAL_ALLOCS: continue else: if gx.added_funcs >= INCREMENTAL_FUNCS: continue gx.added_funcs += 1 gx.added_funcs_set.add(func) if DEBUG(gx, 1): print 'adding', func if objtype: objtype = (objtype,) else: objtype = () # redirect in special cases callfunc = callnode.thing c, dcpa, func = redirect(gx, c, dcpa, func, callfunc, ident, callnode, direct_call, constructor) # already connected to template if (func,) + objtype + c in callnode.nodecp: continue callnode.nodecp.add((func,) + objtype + c) # create new template if not dcpa in func.cp or not c in func.cp[dcpa]: create_template(gx, func, dcpa, c, worklist) cpa = func.cp[dcpa][c] func.xargs[dcpa, cpa] = len(c) # __getattr__, __setattr__ if connect_getsetattr(gx, func, callnode, callfunc, dcpa, worklist): continue # connect actuals and formals actuals_formals(gx, callfunc, func, callnode, dcpa, cpa, objtype + c, analysis, worklist) # connect call and return expressions if func.retnode and not constructor: retnode = gx.cnode[func.retnode.thing, dcpa, cpa] add_constraint(gx, retnode, callnode, worklist) def connect_getsetattr(gx, func, callnode, callfunc, dcpa, worklist): if (isinstance(callfunc.node, Getattr) and callfunc.node.attrname in ['__setattr__', '__getattr__'] and not (isinstance(func.parent, Class) and callfunc.args and callfunc.args[0].value in func.parent.properties)): varname = callfunc.args[0].value parent = func.parent var = default_var(gx, varname, parent, worklist, mv=parent.module.mv) # XXX always make new var?? inode(gx, var).copy(dcpa, 0, worklist) if not gx.cnode[var, dcpa, 0] in gx.types: gx.types[gx.cnode[var, dcpa, 0]] = set() gx.cnode[var, dcpa, 0].mv = parent.module.mv # XXX move into default_var if callfunc.node.attrname == '__setattr__': add_constraint(gx, gx.cnode[callfunc.args[1], callnode.dcpa, callnode.cpa], gx.cnode[var, dcpa, 0], worklist) else: add_constraint(gx, gx.cnode[var, dcpa, 0], callnode, worklist) return True return False def create_template(gx, func, dcpa, c, worklist): # --- unseen cartesian product: create new template if not dcpa in func.cp: func.cp[dcpa] = {} func.cp[dcpa][c] = cpa = len(func.cp[dcpa]) # XXX +1 if DEBUG(gx, 2) and not func.mv.module.builtin and not func.ident in ['__getattr__', '__setattr__']: print 'template', (func, dcpa), c gx.templates += 1 func_copy(gx, func, dcpa, cpa, worklist, c) def actuals_formals(gx, expr, func, node, dcpa, cpa, types, analysis, worklist): objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func = analysis if expr.star_args: # XXX only in lib/ formals = func.formals actuals = len(formals) * [expr.star_args] types = len(formals) * types else: actuals, formals, _, varargs, _error = analyze_args(gx, expr, func, node) if _error: return for (actual, formal, formaltype) in zip(actuals, formals, types): formalnode = gx.cnode[func.vars[formal], dcpa, cpa] if formaltype[1] != 0: # ifa: remember dataflow information for non-simple types if actual is None: if constructor: objexpr = node.thing if method_call or constructor: formalnode.in_.add(gx.cnode[objexpr, node.dcpa, node.cpa]) else: if actual in func.defaults: formalnode.in_.add(gx.cnode[actual, 0, 0]) else: formalnode.in_.add(gx.cnode[actual, node.dcpa, node.cpa]) gx.types[formalnode].add(formaltype) add_to_worklist(worklist, formalnode) # --- iterative flow analysis: after each iteration, detect imprecisions, and split involved contours def ifa(gx): if DEBUG(gx, 1): print 'ifa' split = [] # [(set of creation nodes, new type number), ..] allcsites = {} for n, types in gx.types.iteritems(): if not n.in_: for (cl, dcpa) in types: allcsites.setdefault((cl, dcpa), set()).add(n) for cl in ifa_classes_to_split(gx): if DEBUG(gx, 3): print 'IFA: --- class %s ---' % cl.ident cl.newdcpa = cl.dcpa vars = [cl.vars[name] for name in cl.tvar_names() if name in cl.vars] classes_nr, nr_classes = ifa_class_types(gx, cl, vars) for dcpa in range(1, cl.dcpa): if ifa_split_vars(gx, cl, dcpa, vars, nr_classes, classes_nr, split, allcsites) is not None: if DEBUG(gx, 3): print 'IFA found splits, return' return split if DEBUG(gx, 3): print 'IFA final return' return split def ifa_split_vars(gx, cl, dcpa, vars, nr_classes, classes_nr, split, allcsites): for (varnum, var) in enumerate(vars): if not (var, dcpa, 0) in gx.cnode: continue node = gx.cnode[var, dcpa, 0] creation_points, paths, assignsets, allnodes, csites, emptycsites = ifa_flow_graph(gx, cl, dcpa, node, allcsites) if DEBUG(gx, 3): print 'IFA visit var %s.%s, %d, csites %d' % (cl.ident, var.name, dcpa, len(csites)) if len(csites) + len(emptycsites) == 1: continue if ((len(merge_simple_types(gx, gx.types[node])) > 1 and len(assignsets) > 1) or (assignsets and emptycsites)): # XXX move to split_no_conf ifa_split_no_confusion(gx, cl, dcpa, varnum, classes_nr, nr_classes, csites, emptycsites, allnodes, split) if split: break for node in allnodes: if not ifa_confluence_point(node, creation_points): continue if not node.thing.formal_arg and not isinstance(node.thing.parent, Class): continue remaining = ifa_determine_split(node, allnodes) if len(remaining) < 2 or len(remaining) >= 10: continue # --- if it exists, perform actual splitting if DEBUG(gx, 3): print 'IFA normal split, remaining:', len(remaining) for splitsites in remaining[1:]: ifa_split_class(cl, dcpa, splitsites, split) return split # --- try to partition csites across paths prt = {} for c in csites: ts = set() for p in c.paths: ts.update(p) ts = frozenset(ts) if ts not in prt: prt[ts] = [] prt[ts].append(c) if len(prt) > 1: if DEBUG(gx, 3): print 'IFA partition csites:', prt.values()[0] ifa_split_class(cl, dcpa, prt.values()[0], split) # --- if all else fails, perform wholesale splitting elif len(paths) > 1 and 1 < len(csites) < 10: if DEBUG(gx, 3): print 'IFA wholesale splitting, csites:', len(csites) for csite in csites[1:]: ifa_split_class(cl, dcpa, [csite], split) return split def ifa_split_no_confusion(gx, cl, dcpa, varnum, classes_nr, nr_classes, csites, emptycsites, allnodes, split): '''creation sites on single path: split them off, possibly reusing contour''' attr_types = list(nr_classes[dcpa]) noconf = set([n for n in csites if len(n.paths) == 1] + emptycsites) others = len(csites) + len(emptycsites) - len(noconf) subtype_csites = {} for node in noconf: if node.paths: assign_set = node.paths[0] else: assign_set = frozenset() if attr_types[varnum] == assign_set: others += 1 else: subtype = attr_types[:] subtype[varnum] = assign_set subtype = tuple(subtype) try: subtype_csites[subtype].append(node) except KeyError: subtype_csites[subtype] = [node] items = subtype_csites.items() if not others: items = items[1:] for subtype, csites in subtype_csites.iteritems(): # XXX items? if subtype in classes_nr: # reuse contour nr = classes_nr[subtype] split.append((cl, dcpa, csites, nr)) cl.splits[nr] = dcpa else: # create new contour classes_nr[subtype] = cl.newdcpa ifa_split_class(cl, dcpa, csites, split) if DEBUG(gx, 3) and subtype_csites: print 'IFA found simple split', subtype_csites.keys() def ifa_class_types(gx, cl, vars): ''' create table for previously deduced types ''' classes_nr, nr_classes = {}, {} for dcpa in range(1, cl.dcpa): attr_types = [] # XXX merge with ifa_merge_contours.. sep func? for var in vars: if (var, dcpa, 0) in gx.cnode: attr_types.append(merge_simple_types(gx, gx.cnode[var, dcpa, 0].types())) else: attr_types.append(frozenset()) attr_types = tuple(attr_types) if DEBUG(gx, 3) and [x for x in attr_types if x]: print 'IFA', str(dcpa) + ':', zip([var.name for var in vars], map(list, attr_types)) nr_classes[dcpa] = attr_types classes_nr[attr_types] = dcpa return classes_nr, nr_classes def ifa_determine_split(node, allnodes): ''' determine split along incoming dataflow edges ''' remaining = [incoming.csites.copy() for incoming in node.in_ if incoming in allnodes] # --- try to clean out larger collections, if subsets are in smaller ones for (i, seti) in enumerate(remaining): for setj in remaining[i + 1:]: in_both = seti.intersection(setj) if in_both: if len(seti) > len(setj): seti -= in_both else: setj -= in_both remaining = [setx for setx in remaining if setx] return remaining def ifa_classes_to_split(gx): ''' setup classes to perform splitting on ''' classes = [] for ident in ['list', 'tuple', 'tuple2', 'dict', 'set', 'frozenset', 'deque', 'defaultdict', '__iter', 'array']: for cl in gx.allclasses: if cl.mv.module.builtin and cl.ident == ident: cl.splits = {} classes.append(cl) break random.shuffle(classes) return classes def ifa_confluence_point(node, creation_points): ''' determine if node is confluence point ''' if len(node.in_) > 1 and isinstance(node.thing, Variable): for csite in node.csites: occ = [csite in crpoints for crpoints in creation_points.values()].count(True) if occ > 1: return True return False def ifa_flow_graph(gx, cl, dcpa, node, allcsites): creation_points, paths, assignsets = {}, {}, {} allnodes = set() csites = [] # --- determine assignment sets for a in node.in_: types = gx.types[a] if types: if a.thing in gx.assign_target: # XXX *args target = gx.cnode[gx.assign_target[a.thing], a.dcpa, a.cpa] # print 'target', a, target, types assignsets.setdefault(merge_simple_types(gx, types), []).append(target) # --- determine backflow paths and creation points per assignment set for assign_set, targets in assignsets.iteritems(): path = backflow_path(gx, targets, (cl, dcpa)) paths[assign_set] = path allnodes.update(path) alloc = [n for n in path if not n.in_] creation_points[assign_set] = alloc # --- per node, determine paths it is located on for n in allnodes: n.paths = [] for assign_set, path in paths.iteritems(): for n in path: n.paths.append(assign_set) # --- for each node, determine creation points that 'flow' through it for n in allnodes: n.csites = set() if not n.in_: n.csites.add(n) csites.append(n) flow_creation_sites(csites, allnodes) # csites not flowing to any assignment allcsites2 = allcsites.get((cl, dcpa), set()) emptycsites = list(allcsites2 - set(csites)) for n in emptycsites: n.paths = [] return creation_points, paths, assignsets, allnodes, csites, emptycsites def ifa_split_class(cl, dcpa, things, split): split.append((cl, dcpa, things, cl.newdcpa)) cl.splits[cl.newdcpa] = dcpa cl.newdcpa += 1 def update_progressbar(gx, perc): if not gx.silent: print '\r%s%d%%' % (int(perc * 32) * '*', 100 * perc), if DEBUG(gx, 1): print else: sys.stdout.flush() # --- cartesian product algorithm (cpa) & iterative flow analysis (ifa) def iterative_dataflow_analysis(gx): if not gx.silent: print '[analyzing types..]' backup = backup_network(gx) gx.orig_types = {} for n, t in gx.types.iteritems(): gx.orig_types[n] = t if INCREMENTAL: update_progressbar(gx, 0) gx.added_funcs = INCREMENTAL_FUNCS # analyze root of callgraph in first round gx.added_funcs_set = set() gx.added_allocs = 0 gx.added_allocs_set = set() gx.cpa_limit = CPA_LIMIT gx.cpa_clean = False while True: gx.iterations += 1 gx.total_iterations += 1 maxiter = (gx.iterations == MAXITERS) if DEBUG(gx, 1): print '\n*** iteration %d ***' % gx.iterations # --- propagate using cartesian product algorithm gx.new_alloc_info = {} # print 'table' # print '\n'.join([repr(e)+': '+repr(l) for e,l in gx.alloc_info.items()]) gx.cpa_limited = False propagate(gx) gx.alloc_info = gx.new_alloc_info if gx.cpa_limited: if DEBUG(gx, 1): print 'CPA limit %d reached!' % gx.cpa_limit else: gx.cpa_clean = True # --- ifa: detect conflicting assignments to instance variables, and split contours to resolve these split = ifa(gx) if split: if DEBUG(gx, 1): print '%d splits' % len(split) elif DEBUG(gx, 3): print 'IFA splits', [(s[0], s[1], s[3]) for s in split] if not split or maxiter: if DEBUG(gx, 1) and not maxiter: print 'no splits' if INCREMENTAL: allfuncs = len([f for f in gx.allfuncs if not f.mv.module.builtin and not [start for start in ('__iadd__', '__imul__', '__str__', '__hash__') if f.ident.startswith(start)]]) perc = 1.0 if allfuncs: perc = min(len(gx.added_funcs_set) / float(allfuncs), 1.0) update_progressbar(gx, perc) if maxiter: print '\n*WARNING* reached maximum number of iterations' gx.maxhits += 1 if gx.maxhits == 3: return gx.cpa_clean = False if INCREMENTAL and (gx.added_funcs or gx.added_allocs): gx.added_funcs = 0 gx.added_allocs = 0 gx.iterations = 0 elif gx.cpa_limited: gx.cpa_limit *= 2 gx.iterations = 0 else: if INCREMENTAL: update_progressbar(gx, 1.0) if DEBUG(gx, 1): print '\niterations:', gx.total_iterations, 'templates:', gx.templates elif not gx.silent: print return if not INCREMENTAL and not DEBUG(gx, 1): sys.stdout.write('*') sys.stdout.flush() # --- update alloc info table for split contours for cl, dcpa, nodes, newnr in split: for n in nodes: parent = parent_func(gx, n.thing) if parent: if n.dcpa in parent.cp: for cart, cpa in parent.cp[n.dcpa].items(): # XXX not very fast if cpa == n.cpa: if parent.parent and isinstance(parent.parent, Class): # self cart = ((parent.parent, n.dcpa),) + cart gx.alloc_info[parent.ident, cart, n.thing] = (cl, newnr) break beforetypes = backup[0] # --- clean out constructor node types in functions, possibly to be seeded again for node in beforetypes: func = parent_func(gx, node.thing) if isinstance(func, Function): if node.constructor and isinstance(node.thing, (List, Dict, Tuple, ListComp, CallFunc)): beforetypes[node] = set() # --- create new class types, and seed global nodes for cl, dcpa, nodes, newnr in split: if newnr == cl.dcpa: class_copy(gx, cl, newnr) cl.dcpa += 1 # print 'split off', nodes, newnr for n in nodes: if not parent_func(gx, n.thing): beforetypes[n] = set([(cl, newnr)]) # --- restore network restore_network(gx, backup) # --- seed allocation sites in newly created templates (called by function.copy()) def ifa_seed_template(gx, func, cart, dcpa, cpa, worklist): if cart is not None: # (None means we are not in the process of propagation) # print 'funccopy', func.ident #, func.nodes if isinstance(func.parent, Class): # self cart = ((func.parent, dcpa),) + cart added = gx.added_allocs_set added_new = 0 for node in func.nodes_ordered: if node.constructor and isinstance(node.thing, (List, Dict, Tuple, ListComp, CallFunc)): if node.thing not in added: if INCREMENTAL_DATA and not func.mv.module.builtin: if gx.added_allocs >= INCREMENTAL_ALLOCS: continue added_new += 1 gx.added_allocs += 1 added.add(node.thing) # --- contour is specified in alloc_info parent = node.parent while isinstance(parent.parent, Function): parent = parent.parent alloc_id = (parent.ident, cart, node.thing) # XXX ident? alloc_node = gx.cnode[node.thing, dcpa, cpa] if alloc_id in gx.alloc_info: pass # print 'specified' # print 'specified', func.ident, cart, alloc_node, alloc_node.callfuncs, gx.alloc_info[alloc_id] # --- contour is newly split: copy allocation type for 'mother' contour; modify alloc_info else: mother_alloc_id = alloc_id for (id, c, thing) in gx.alloc_info: if id == parent.ident and thing is node.thing: for a, b in zip(cart, c): if a != b and not (isinstance(a[0], Class) and a[0] is b[0] and a[1] in a[0].splits and a[0].splits[a[1]] == b[1]): break else: mother_alloc_id = (id, c, thing) break # print 'not specified.. mother id:', mother_alloc_id if mother_alloc_id in gx.alloc_info: gx.alloc_info[alloc_id] = gx.alloc_info[mother_alloc_id] # print 'mothered', alloc_node, gx.alloc_info[mother_alloc_id] elif gx.orig_types[node]: # empty constructors that do not flow to assignments have no type # print 'no mother', func.ident, cart, mother_alloc_id, alloc_node, gx.types[node] gx.alloc_info[alloc_id] = list(gx.orig_types[node])[0] else: # print 'oh boy' for (id, c, thing) in gx.alloc_info: # XXX vhy? if id == parent.ident and thing is node.thing: mother_alloc_id = (id, c, thing) gx.alloc_info[alloc_id] = gx.alloc_info[mother_alloc_id] break if alloc_id in gx.alloc_info: gx.new_alloc_info[alloc_id] = gx.alloc_info[alloc_id] gx.types[alloc_node] = set() # print 'seeding..', alloc_node, gx.alloc_info[alloc_id], alloc_node.thing in gx.empty_constructors gx.types[alloc_node].add(gx.alloc_info[alloc_id]) add_to_worklist(worklist, alloc_node) if DEBUG(gx, 1) and added_new and not func.mv.module.builtin: print '%d seed(s)' % added_new, func # --- for a set of target nodes of a specific type of assignment (e.g. int to (list,7)), flow back to creation points def backflow_path(gx, worklist, t): path = set(worklist) while worklist: new = set() for node in worklist: for incoming in node.in_: if t in gx.types[incoming]: incoming.fout.add(node) if not incoming in path: path.add(incoming) new.add(incoming) worklist = new return path def flow_creation_sites(worklist, allnodes): while worklist: new = set() for node in worklist: for out in node.fout: if out in allnodes: oldsize = len(out.csites) out.csites.update(node.csites) if len(out.csites) > oldsize: new.add(out) worklist = new # --- backup constraint network def backup_network(gx): beforetypes = {} for node, typeset in gx.types.items(): beforetypes[node] = typeset.copy() beforeconstr = gx.constraints.copy() beforeinout = {} for node in gx.types: beforeinout[node] = (node.in_.copy(), node.out.copy()) beforecnode = gx.cnode.copy() return (beforetypes, beforeconstr, beforeinout, beforecnode) # --- restore constraint network, introducing new types def restore_network(gx, backup): beforetypes, beforeconstr, beforeinout, beforecnode = backup gx.types = {} for node, typeset in beforetypes.items(): gx.types[node] = typeset.copy() gx.constraints = beforeconstr.copy() gx.cnode = beforecnode.copy() for node, typeset in gx.types.items(): node.nodecp = set() node.defnodes = False befinout = beforeinout[node] node.in_, node.out = befinout[0].copy(), befinout[1].copy() node.fout = set() # XXX ? for var in gx.allvars: # XXX we have to restore some variable constraint nodes.. remove vars? if not (var, 0, 0) in gx.cnode: CNode(gx, var, parent=var.parent) for func in gx.allfuncs: func.cp = {} def merge_simple_types(gx, types): merge = types.copy() if len(types) > 1 and (def_class(gx, 'none'), 0) in types: if not (def_class(gx, 'int_'), 0) in types and not (def_class(gx, 'float_'), 0) in types and not (def_class(gx, 'bool_'), 0) in types: merge.remove((def_class(gx, 'none'), 0)) return frozenset(merge) def get_classes(gx, var): return set(t[0] for t in gx.merged_inh[var] if isinstance(t[0], Class) and not t[0].mv.module.builtin) def deepcopy_classes(gx, classes): changed = True while changed: changed = False for cl in classes.copy(): for var in cl.vars.values(): if var not in gx.merged_inh: continue newcl = get_classes(gx, var) if newcl - classes: changed = True classes.update(newcl) return classes def determine_classes(gx): # XXX modeling..? if 'copy' not in gx.modules: return func = gx.modules['copy'].mv.funcs['copy'] var = func.vars[func.formals[0]] for cl in get_classes(gx, var): cl.has_copy = True func = gx.modules['copy'].mv.funcs['deepcopy'] var = func.vars[func.formals[0]] for cl in deepcopy_classes(gx, get_classes(gx, var)): cl.has_deepcopy = True def analyze(gx, module_name): # --- build dataflow graph from source code gx.main_module = graph.parse_module(module_name, gx) # --- seed class_.__name__ attributes.. for cl in gx.allclasses: if cl.ident == 'class_': var = default_var(gx, '__name__', cl) gx.types[inode(gx, var)] = set([(def_class(gx, 'str_'), 0)]) # --- non-ifa: copy classes for each allocation site for cl in gx.allclasses: if cl.ident in ['int_', 'float_', 'none', 'class_', 'str_', 'bool_']: continue if cl.ident == 'list': cl.dcpa = len(gx.list_types) + 2 elif cl.ident != '__iter': # XXX huh cl.dcpa = 2 for dcpa in range(1, cl.dcpa): class_copy(gx, cl, dcpa) var = default_var(gx, 'unit', def_class(gx, 'str_')) gx.types[inode(gx, var)] = set([(def_class(gx, 'str_'), 0)]) # --- cartesian product algorithm & iterative flow analysis iterative_dataflow_analysis(gx) if not gx.silent: print '[generating c++ code..]' for cl in gx.allclasses: for name in cl.vars: if name in cl.parent.vars and not name.startswith('__'): error.error("instance variable '%s' of class '%s' shadows class variable" % (name, cl.ident), gx, warning=True) gx.merged_inh = merged(gx, gx.types, inheritance=True) analyze_virtuals(gx) determine_classes(gx) # --- add inheritance relationships for non-original Nodes (and temp_vars?); XXX register more, right solution? for func in gx.allfuncs: if func in gx.inheritance_relations: for inhfunc in gx.inheritance_relations[func]: for a, b in zip(func.registered, inhfunc.registered): graph.inherit_rec(gx, a, b, func.mv) for a, b in zip(func.registered_temp_vars, inhfunc.registered_temp_vars): # XXX more general gx.inheritance_temp_vars.setdefault(a, []).append(b) gx.merged_inh = merged(gx, gx.types, inheritance=True) # error for dynamic expression without explicit type declaration for node in gx.merged_inh: if isinstance(node, Node) and not isinstance(node, AssAttr) and not inode(gx, node).mv.module.builtin: nodetypestr(gx, node, inode(gx, node).parent, mv=inode(gx, node).mv) return gx def register_temp_var(var, parent): if isinstance(parent, Function): parent.registered_temp_vars.append(var) def default_var(gx, name, parent, worklist=None, mv=None): if parent: mv = parent.mv var = lookup_var(name, parent, local=True, mv=mv) if not var: var = Variable(name, parent) if parent: # XXX move to Variable? parent.vars[name] = var else: mv.globals[name] = var gx.allvars.add(var) if (var, 0, 0) not in gx.cnode: newnode = CNode(gx, var, parent=parent, mv=mv) if parent: newnode.mv = parent.mv else: newnode.mv = mv add_to_worklist(worklist, newnode) gx.types[newnode] = set() if isinstance(parent, Function) and parent.listcomp and not var.registered: while isinstance(parent, Function) and parent.listcomp: # XXX parent = parent.parent register_temp_var(var, parent) return var def var_types(gx, var): return inode(gx, var).types()
kirbyfan64/shedskin
shedskin/infer.py
Python
gpl-3.0
57,578
[ "VisIt" ]
a61d71aec5e5a140bfe2b8fbee6d446d3828f4ee8cf8ec008cacebe2237bd47f
# beamsearch.py - breadth-first search with limited queueing # # Copyright 2016-2019 NetworkX developers. # # This file is part of NetworkX. # # NetworkX is distributed under a BSD license; see LICENSE.txt for more # information. """Basic algorithms for breadth-first searching the nodes of a graph.""" import networkx as nx from .breadth_first_search import generic_bfs_edges __all__ = ['bfs_beam_edges'] def bfs_beam_edges(G, source, value, width=None): """Iterates over edges in a beam search. The beam search is a generalized breadth-first search in which only the "best" *w* neighbors of the current node are enqueued, where *w* is the beam width and "best" is an application-specific heuristic. In general, a beam search with a small beam width might not visit each node in the graph. Parameters ---------- G : NetworkX graph source : node Starting node for the breadth-first search; this function iterates over only those edges in the component reachable from this node. value : function A function that takes a node of the graph as input and returns a real number indicating how "good" it is. A higher value means it is more likely to be visited sooner during the search. When visiting a new node, only the `width` neighbors with the highest `value` are enqueued (in decreasing order of `value`). width : int (default = None) The beam width for the search. This is the number of neighbors (ordered by `value`) to enqueue when visiting each new node. Yields ------ edge Edges in the beam search starting from `source`, given as a pair of nodes. Examples -------- To give nodes with, for example, a higher centrality precedence during the search, set the `value` function to return the centrality value of the node:: >>> G = nx.karate_club_graph() >>> centrality = nx.eigenvector_centrality(G) >>> source = 0 >>> width = 5 >>> for u, v in nx.bfs_beam_edges(G, source, centrality.get, width): ... print((u, v)) # doctest: +SKIP """ if width is None: width = len(G) def successors(v): """Returns a list of the best neighbors of a node. `v` is a node in the graph `G`. The "best" neighbors are chosen according to the `value` function (higher is better). Only the `width` best neighbors of `v` are returned. The list returned by this function is in decreasing value as measured by the `value` function. """ # TODO The Python documentation states that for small values, it # is better to use `heapq.nlargest`. We should determine the # threshold at which its better to use `heapq.nlargest()` # instead of `sorted()[:]` and apply that optimization here. # # If `width` is greater than the number of neighbors of `v`, all # neighbors are returned by the semantics of slicing in # Python. This occurs in the special case that the user did not # specify a `width`: in this case all neighbors are always # returned, so this is just a (slower) implementation of # `bfs_edges(G, source)` but with a sorted enqueue step. return iter(sorted(G.neighbors(v), key=value, reverse=True)[:width]) # TODO In Python 3.3+, this should be `yield from ...` for e in generic_bfs_edges(G, source, successors): yield e
sserrot/champion_relationships
venv/Lib/site-packages/networkx/algorithms/traversal/beamsearch.py
Python
mit
3,543
[ "VisIt" ]
de65be27b2c301a35cdfe328441c9d8e44ec4da1df5532bedb774062678da410
""" Unit Tests for preprocess module. """ import sys import math import unittest import argparse import numpy import pysam from mixemt import phylotree from mixemt import preprocess # TODO: Stuff to test: # def process_reads(samfile, var_pos, min_mq, min_bq): # def build_em_input(samfile, refseq, phylo, args): class TestHapVarBaseMatrix(unittest.TestCase): def setUp(self): phy_in = ['I, A1G ,,', ',H, A3T A5T ,,', ',,F, A6T ,,', ',,,B, A8T ,,', ',,,C, T5A ,,', ',,G, A7T ,,', ',,,D, A9T ,,', ',,,E, A4T ,,', ',A, A2T A4T ,,'] self.phy = phylotree.Phylotree(phy_in) self.ref = "AAAAAAAAA" def test_hapvarbasematrix_init(self): mut_wt, mut_max = 0.02, 0.6 hvb = preprocess.HapVarBaseMatrix(self.ref, self.phy, mut_wt, mut_max) self.assertEqual(hvb.refseq, self.ref) self.assertEqual(hvb.phylo, self.phy) self.assertEqual(hvb.mut_wt, mut_wt) self.assertEqual(hvb.mut_max, mut_max) markers = {'A':{1:'T', 3:'T', 0:'G'}, 'B':{0:'G', 2:'T', 4:'T', 5:'T', 7:'T'}, 'C':{0:'G', 2:'T', 5:'T'}, 'D':{0:'G', 2:'T', 4:'T', 6:'T', 8:'T'}, 'E':{0:'G', 2:'T', 3:'T', 4:'T', 6:'T'}, 'F':{0:'G', 2:'T', 4:'T', 5:'T'}, 'G':{0:'G', 2:'T', 4:'T', 6:'T'}, 'H':{0:'G', 2:'T', 4:'T'}, 'I':{0:'G'}} self.assertEqual(hvb.markers, markers) def test_probs(self): mut_wt, mut_max = 0.10, 0.10 hvb = preprocess.HapVarBaseMatrix(self.ref, self.phy, mut_wt, mut_max) # hits self.assertEqual(hvb._prob(hvb.markers['I'], 0, 'G'), 1.0 - 0.1) self.assertEqual(hvb._prob(hvb.markers['I'], 4, 'A'), 1.0 - 0.1) self.assertEqual(hvb._prob(hvb.markers['I'], 3, 'A'), 1.0 - 0.1) # misses self.assertEqual(hvb._prob(hvb.markers['I'], 0, 'A'), 0.1 / 3.0) self.assertEqual(hvb._prob(hvb.markers['I'], 4, 'T'), 0.1 / 3.0) self.assertEqual(hvb._prob(hvb.markers['I'], 3, 'T'), 0.1 / 3.0) def test_probs_mut_wts(self): mut_wt, mut_max = 0.10, 0.50 hvb = preprocess.HapVarBaseMatrix(self.ref, self.phy, mut_wt, mut_max) # hits self.assertEqual(hvb._prob(hvb.markers['I'], 0, 'G'), 1.0 - 0.1) self.assertEqual(hvb._prob(hvb.markers['I'], 4, 'A'), 1.0 - 0.2) self.assertEqual(hvb._prob(hvb.markers['I'], 3, 'A'), 1.0 - 0.2) # misses self.assertEqual(hvb._prob(hvb.markers['I'], 0, 'A'), 0.1 / 3.0) self.assertEqual(hvb._prob(hvb.markers['I'], 4, 'T'), 0.2 / 3.0) self.assertEqual(hvb._prob(hvb.markers['I'], 3, 'T'), 0.2 / 3.0) def test_probs_for_vars(self): mut_wt, mut_max = 0.10, 0.10 hvb = preprocess.HapVarBaseMatrix(self.ref, self.phy, mut_wt, mut_max) obs_I = list(zip(list(range(9)), "GAAAAAAAA")) self.assertAlmostEqual(hvb.prob_for_vars('I', obs_I), math.log(0.9 ** 9)) self.assertAlmostEqual(hvb.prob_for_vars('C', obs_I), math.log((0.9 ** 7) * ((0.1 / 3) ** 2))) self.assertAlmostEqual(hvb.prob_for_vars('D', obs_I), math.log((0.9 ** 5) * ((0.1 / 3) ** 4))) def test_probs_for_vars_mut_wts(self): mut_wt, mut_max = 0.10, 0.50 hvb = preprocess.HapVarBaseMatrix(self.ref, self.phy, mut_wt, mut_max) obs_I = list(zip(list(range(9)), "GAAAAAAAA")) self.assertAlmostEqual(hvb.prob_for_vars('I', obs_I), math.log((0.9 ** 7) * (0.8 ** 2))) self.assertAlmostEqual(hvb.prob_for_vars('C', obs_I), math.log((0.9 ** 5) * (0.8 ** 2) * ((0.1 / 3) ** 2))) class TestProcessReads(unittest.TestCase): def setUp(self): self.mq = 30 self.bq = 30 aln1 = pysam.AlignedSegment() aln1.reference_start = 10 aln1.query_name = 'read1' aln1.mapping_quality = 30 aln1.query_sequence = "AAAAATAAAATAAAAT" aln1.query_qualities = [30] * 16 aln1.cigarstring = '16M' aln2 = pysam.AlignedSegment() aln2.reference_start = 12 aln2.query_name = 'read2' aln2.mapping_quality = 20 aln2.query_sequence = "AAAGAAGAAAAG" qqual = [33] * 12 qqual[3] = 20 aln2.query_qualities = qqual aln2.cigarstring = '5M2D7M' aln3 = pysam.AlignedSegment() aln3.mapping_quality = 0 aln3.query_name = 'read3' self.alns = [aln1, aln2, aln3] def test_process_reads_read_obs_simple(self): res = preprocess.process_reads(self.alns, [15, 20, 25], 20, 10) exp = {'read1':{15:'T', 20:'T', 25:'T'}, 'read2':{15:'G', 20:'G', 25:'G'}} self.assertEqual(res, exp) def test_process_reads_read_obs_min_map_quality(self): res = preprocess.process_reads(self.alns, [15, 20, 25], 25, 10) exp = {'read1':{15:'T', 20:'T', 25:'T'}} self.assertEqual(res, exp) def test_process_reads_read_obs_min_base_quality(self): res = preprocess.process_reads(self.alns, [15, 20, 25], 20, 30) exp = {'read1':{15:'T', 20:'T', 25:'T'}, 'read2':{20:'G', 25:'G'}} self.assertEqual(res, exp) def test_process_reads_read_obs_paired_end(self): aln1b = pysam.AlignedSegment() aln1b.reference_start = 30 aln1b.query_name = 'read1' aln1b.mapping_quality = 30 aln1b.query_sequence = "AAAAACAAAACAAAAT" aln1b.query_qualities = [30] * 16 aln1b.cigarstring = '16M' self.alns.append(aln1b) var_pos = [15, 20, 25, 35, 40] res = preprocess.process_reads(self.alns, var_pos, 20, 10) exp = {'read1':{15:'T', 20:'T', 25:'T', 35:'C', 40:'C'}, 'read2':{15:'G', 20:'G', 25:'G'}} self.assertEqual(res, exp) def test_process_reads_read_obs_paired_end_overlap(self): aln1b = pysam.AlignedSegment() aln1b.reference_start = 20 aln1b.query_name = 'read1' aln1b.mapping_quality = 20 aln1b.query_sequence = "AAAAATAAAACAAAAT" aln1b.query_qualities = [30] * 16 aln1b.cigarstring = '16M' self.alns.append(aln1b) var_pos = [15, 20, 25, 35] res = preprocess.process_reads(self.alns, var_pos, 20, 10) exp = {'read1':{15:'T', 25:'T', 35:'T'}, 'read2':{15:'G', 20:'G', 25:'G'}} self.assertEqual(res, exp) def test_process_reads_read_obs_paired_end_overlap_1bad_base_qual(self): aln1b = pysam.AlignedSegment() aln1b.reference_start = 20 aln1b.query_name = 'read1' aln1b.mapping_quality = 20 aln1b.query_sequence = "AAAAATAAAACAAAAC" qqual = [30] * 16 qqual[0] = 5 aln1b.query_qualities = qqual aln1b.cigarstring = '16M' self.alns.append(aln1b) var_pos = [15, 20, 25, 35] res = preprocess.process_reads(self.alns, var_pos, 20, 10) exp = {'read1':{15:'T', 20:'T', 25:'T', 35:'C'}, 'read2':{15:'G', 20:'G', 25:'G'}} self.assertEqual(res, exp) class TestReadSignatures(unittest.TestCase): def test_read_signature(self): obs_tab = {1:'A', 2:'C', 3:'G', 4:'T'} sig = preprocess.read_signature(obs_tab) self.assertEqual(sig, "1:A,2:C,3:G,4:T") obs_from_sig = preprocess.pos_obs_from_sig(sig) self.assertEqual(obs_tab, dict(obs_from_sig)) def test_read_signature_bad_input(self): with self.assertRaises(TypeError): obs_tab = {'A':'A', 2:'C', 3:'G', 4:'T'} preprocess.read_signature(obs_tab) with self.assertRaises(TypeError): obs_tab = "1:A,2:C,3:G,4:T" preprocess.read_signature(obs_tab) def test_pos_obs_from_sig(self): with self.assertRaises(TypeError): sig = "A:A,2:C,3:G,4:T" preprocess.read_signature(sig) with self.assertRaises(TypeError): sig = "1:A,2:C,3:G,4:T,5" preprocess.read_signature(sig) def test_reduce_reads_simple(self): reads = {"read1":{1:'A', 2:'C'}, "read2":{3:'G', 4:'T'}} res = preprocess.reduce_reads(reads) sigs = {"1:A,2:C":["read1"], "3:G,4:T":["read2"]} self.assertEqual(res, sigs) def test_reduce_reads_overlap(self): reads = {"read1":{1:'A', 2:'C'}, "read2":{3:'G', 4:'T'}, "read3":{2:'C', 3:'G'}} res = preprocess.reduce_reads(reads) sigs = {"1:A,2:C":["read1"], "3:G,4:T":["read2"], "2:C,3:G":["read3"]} self.assertEqual(res, sigs) def test_reduce_reads_match(self): reads = {"read1":{1:'A', 2:'C'}, "read2":{3:'G', 4:'T'}, "read3":{2:'C', 1:'A'}} res = preprocess.reduce_reads(reads) sigs = {"1:A,2:C":["read1", "read3"], "3:G,4:T":["read2"]} self.assertEqual(res, sigs) def test_reduce_reads_diff_bases(self): reads = {"read1":{1:'A', 2:'C'}, "read2":{3:'G', 4:'T'}, "read3":{2:'C', 1:'T'}} res = preprocess.reduce_reads(reads) sigs = {"1:A,2:C":["read1"], "3:G,4:T":["read2"], "1:T,2:C":["read3"]} self.assertEqual(res, sigs) class TestBuildEMMatrix(unittest.TestCase): def setUp(self): parser = argparse.ArgumentParser() self.args = parser.parse_args([]) self.args.verbose = False phy_in = ['I, A1G ,,', ',H, A3T A5T ,,', ',,F, A6T ,,', ',,,B, A8T ,,', ',,,C, T5A ,,', ',,G, A7T ,,', ',,,D, A9T ,,', ',,,E, A4T ,,', ',A, A2T A4T ,,'] self.phy = phylotree.Phylotree(phy_in) self.ref = "AAAAAAAAA" self.haps = list("ABCDEFGHI") def test_build_em_matrix_simple(self): reads = ["1:A,2:C", "1:T,2:C", "3:T,4:T", "2:A,4:T"] in_mat = preprocess.build_em_matrix(self.ref, self.phy, reads, self.haps, self.args) r1 = ([(0.01/3) * (0.01/3)] + ([0.99 * (0.01/3)] * 8)) r2 = ([0.99 * (0.01/3)] + ([(0.01/3) * (0.01/3)] * 8)) r3 = ([(0.98) * (0.02/3)] + [(0.02/3) * (0.98)] + [(0.02/3) * (0.02/3)] + [(0.02/3) * (0.98)] + [0.98 * 0.98] + ([(0.02/3) * (0.98)] * 3) + [(0.02/3) * (0.02/3)]) r4 = ([0.99 * (0.02/3)] + [(0.01/3) * (0.98)] + [(0.01/3) * (0.02/3)] + ([(0.01/3) * (0.98)] * 5) + [0.99 * (0.02/3)]) res_mat = numpy.log(numpy.array([r1, r2, r3, r4])) self.assertEqual(in_mat.shape, (len(reads), len(self.haps))) self.assertTrue(numpy.allclose(in_mat, res_mat)) def test_build_em_matrix_diff_mut(self): # need to implement this first. pass class TestBuildEMInput(unittest.TestCase): # Need bam file for this too. pass if __name__ == '__main__': unittest.main()
svohr/mixemt
mixemt/test/preprocess_test.py
Python
mit
11,279
[ "pysam" ]
8902bd57b423590833e42e7312a0938bd9e69ae2047684dc43e8f7d6e38b9144
#!/bin/python #-*-coding: utf-8-*- import urllib import urllib2 import cookielib import re import sys # import random class baidu_Search: def __init__(self): self.enable = True self.page = 0 def rmTags(self,str): pattern1 = re.compile(r'<.*?>',re.DOTALL) pattern2 = re.compile(r'&nbsp') pattern3 = re.compile(ur';-;') pattern4 = re.compile(ur'&gt;\s*') str = pattern1.sub('',str) str = pattern2.sub('',str) str = pattern3.sub(u',',str) str = pattern4.sub(u'',str) return str def getPageCounts(self,htmlunicode): # <div class="nums">百度为您找到相关结果约68,900,000个</div> pattern = re.compile(r'<div class="nums">.+?</div>(.*?)</div>') m = pattern.search(htmlunicode) pagesCount = '' if m: pagesCount = m.group(1) else: print u'不好意思,未查询到任何结果!' return pagesCount def getNextPageUrl(self,htmlunicode): pattern = re.compile(r'<div id="page"\s*>.*?<strong>.*?</strong><a href="(.*?)">') m = pattern.search(htmlunicode) nextPageUrl = '' if m: nextPageUrl = 'http://www.baidu.com' + m.group(1) else: print u"未找到下一页" return nextPageUrl def getTitles_Abstracts(self,htmlunicode): patternResults = re.compile(r'<div class="result c-container\s*".*?><h3 class="t"\s*>.*?<div class="c-abstract"\s*>.*?</div>',re.DOTALL) # findall在无分组时返回元素为整个匹配字符串的list,在有分组时返回tuple类型的list m = patternResults.findall(htmlunicode) titles_abstracts = [] if (m): # print m for result in m: patternTA = re.compile(r'<h3 class="t"\s*>(.*?)</h3>.*?<div class="c-abstract">(.*?)</div>',re.DOTALL) mTA = patternTA.search(result) if (mTA): title = self.rmTags(mTA.group(1)) abstract = self.rmTags(mTA.group(2)) titles_abstracts.append((title,abstract)) else: titles_abstracts.append((u'没有标题',u'没有摘要')) else: print u'为匹配到标题和摘要' return titles_abstracts def Search(self,kw): kw = kw.decode(sys.stdin.encoding).encode('utf-8') searchurl = 'http://www.baidu.com/'+'s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd='+urllib.quote(kw) cj = cookielib.CookieJar(); opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)); urllib2.install_opener(opener) req = urllib2.Request(searchurl) """user_agents = ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.76 Safari/537.36' 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0', 'IBM WebExplorer /v0.94', 'Galaxy/1.0 [en] (Mac OS X 10.5.6; U; en)', 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', 'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) /Chrome/28.0.1468.0 Safari/537.36', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)' ] r = random.randint(0,7) req.add_header('User-agent',user_agents[r]) """ resp = urllib2.urlopen(req) htmlunicode = resp.read().decode('utf-8') # print htmlunicode # myfile = open('/Users/apple/Desktop/text.txt','w') # myfile.write(htmlunicode.encode('utf-8')) pagesCount = self.getPageCounts(htmlunicode) print pagesCount while self.enable: print u'请按[回车键]浏览第',self.page+1,'页内容,输入[quit]退出程序:' myInput = raw_input() if (myInput== 'quit'): break titles_abstracts = self.getTitles_Abstracts(htmlunicode) for index in range(len(titles_abstracts)): print u"第",self.page+1,"页第",index+1,"个搜索结果..." print u"标题: ",titles_abstracts[index][0] print u"摘要: ",titles_abstracts[index][1] print "\r\n" nextPageUrl = self.getNextPageUrl(htmlunicode) self.page += 1 # print u'下一页url为:', nextPageUrl if (nextPageUrl == ''): break resp = urllib2.urlopen(nextPageUrl) htmlunicode = resp.read().decode('utf-8') if __name__ == '__main__': print u""" -------------------------------------------- author: hao-app date : 2015-03-11 howTo : enter "quit" to quit program advert: 按下任意键来浏览,按下quit退出 -------------------------------------------- """ myBaidu = baidu_Search() myBaidu.Search(raw_input(u'enter keyword to search: '))
hao-app/baiduCrawl
baiduSearch.py
Python
gpl-2.0
5,356
[ "Galaxy" ]
ea1766059258900b46b598ca12bf25acf5681cddfacdccec65e4b594d6302ff5
#!/usr/bin/python ''' wrapper for blastn ''' from rjv.fileio import * from rjv.blast import _blastfields_all as fields import sys usage=\ ''' wrapper for blastn blastme.py query.fa subject.fa > output.csv ''' if len(sys.argv) < 3: print usage exit() blast = 'blastn' ops = '-evalue 1e-20' # -num_threads 4' outfmt = '-outfmt "10 ' + ' '.join(fields) + '"' query = sys.argv[1] subject = sys.argv[2] sys.stdout.write( '#' + ','.join(fields) + '\n') sys.stdout.flush() run('[blast] -query [query] -subject [subject] [ops] [outfmt]')
robertvi/rjv
blastme.py
Python
gpl-2.0
550
[ "BLAST" ]
b6d572e12f49177c85e44648ab32753f925f61e701e5d2af6d919b32e8aa8aa3
#! /usr/bin/env python import os import setuptools meta = {} with open(os.path.join('pycallnumber', '__init__.py')) as fh: variables = [l.split(' = ') for l in fh if l.startswith('__')] for var in variables: meta[var[0].strip('_')] = var[-1].strip('"\'\n') setuptools.setup( name=meta['name'], author=meta['author'], author_email=meta['author_email'], version=meta['version'], url=meta['url'], license=meta['license'], description=meta['description'], long_description=('Visit {} for the latest documentation.'.format( meta['url'])), maintainer=meta['maintainer'], keywords=meta['keywords'], packages=setuptools.find_packages(), install_requires=[ 'future;python_version=="2.7"' ], setup_requires=[ 'pytest-runner' ], tests_require=[ 'pytest' ], classifiers=[ 'Intended Audience :: Education', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ] )
jthomale/pycallnumber
setup.py
Python
bsd-3-clause
1,274
[ "VisIt" ]
2c0f5793c8156927f5e4b7e3c5abd19fa6b9f8d1552fa9be02e880ade2b52d0c
from datetime import datetime from sqlalchemy import Table, Column, String, DateTime from sqlalchemy.orm import class_mapper from gearshift import config from gearshift.database import get_engine, bind_metadata, metadata, session, mapper from gearshift.util import load_class from gearshift.visit.api import BaseVisitManager, Visit import logging log = logging.getLogger("gearshift.identity.savisit") visit_class = None class SqlAlchemyVisitManager(BaseVisitManager): def __init__(self, timeout): global visit_class visit_class_path = config.get("tools.visit.saprovider.model", "gearshift.visit.savisit.TG_Visit") visit_class = load_class(visit_class_path) if visit_class is None: msg = 'No visit class found for %s' % visit_class_path msg += ', did you run setup.py develop?' log.error(msg) bind_metadata() if visit_class is TG_Visit: mapper(visit_class, visits_table) # base-class' __init__ triggers self.create_model, so mappers need to # be initialized before. super(SqlAlchemyVisitManager, self).__init__(timeout) def create_model(self): """Create the Visit table if it doesn't already exist.""" bind_metadata() class_mapper(visit_class).local_table.create(checkfirst=True) def new_visit_with_key(self, visit_key): created = datetime.now() visit = visit_class() visit.visit_key = visit_key visit.created = created visit.expiry = created + self.timeout session.flush() return Visit(visit_key, True) def visit_for_key(self, visit_key): """Return the visit for this key. Returns None if the visit doesn't exist or has expired. """ visit = visit_class.lookup_visit(visit_key) if not visit: return None now = datetime.now(visit.expiry.tzinfo) if visit.expiry < now: return None # Visit hasn't expired, extend it self.update_visit(visit_key, now+self.timeout) return Visit(visit_key, False) def update_queued_visits(self, queue): # TODO this should be made transactional table = class_mapper(visit_class).mapped_table engine = table.bind # Now update each of the visits with the most recent expiry for visit_key, expiry in queue.items(): log.info("updating visit (%s) to expire at %s", visit_key, expiry) # FIXME: Need to support custom column names engine.execute(table.update(table.c.visit_key==visit_key, values={'expiry': expiry})) # The Visit table visits_table = Table('tg_visit', metadata, Column('visit_key', String(40), primary_key=True), Column('created', DateTime, nullable=False, default=datetime.now), Column('expiry', DateTime) ) class TG_Visit(object): @classmethod def lookup_visit(cls, visit_key): return Visit.get(visit_key)
dbrattli/python-gearshift
gearshift/visit/savisit.py
Python
mit
3,003
[ "VisIt" ]
61fc2cd45e6b743ba7e71bca35bc4fd1000f01b4788f6fed259ec0c881543926
#!/usr/bin/env python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright INRIA # Contributors: Nicolas P. Rougier (Nicolas.Rougier@inria.fr) # # DANA is a computing framework for the simulation of distributed, # asynchronous, numerical and adaptive models. # # This software is governed by the CeCILL license under French law and abiding # by the rules of distribution of free software. You can use, modify and/ or # redistribute the software under the terms of the CeCILL license as circulated # by CEA, CNRS and INRIA at the following URL # http://www.cecill.info/index.en.html. # # As a counterpart to the access to the source code and rights to copy, modify # and redistribute granted by the license, users are provided only with a # limited warranty and the software's author, the holder of the economic # rights, and the successive licensors have only limited liability. # # In this respect, the user's attention is drawn to the risks associated with # loading, using, modifying and/or developing or reproducing the software by # the user in light of its specific status of free software, that may mean that # it is complicated to manipulate, and that also therefore means that it is # reserved for developers and experienced professionals having in-depth # computer knowledge. Users are therefore encouraged to load and test the # software's suitability as regards their requirements in conditions enabling # the security of their systems and/or data to be ensured and, more generally, # to use and operate it in the same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL license and that you accept its terms. # ----------------------------------------------------------------------------- ''' Numerical integration of dynamic neural fields ---------------------------------------------- This script implements the numerical integration of dynamic neural fields [1]_ of the form: α ∂U(x,t)/∂t = -U(x,t) + τ*(∫ w(|x-y|).f(U(y,t)).dy + I(x,t) + h ) where U(x,t) is the potential of a neural population at position x and time t W(d) is a neighborhood function from ℝ⁺ → ℝ f(u) is the firing rate of a single neuron from ℝ → ℝ I(x,t) is the input at position x and time t h is the resting potential α is the temporal decay of the synapse τ is a scaling term In the following example, two stimuli are presented to the DNF and the DNF stabilizes itself onto one of the two stimuli because of the presence of noise. If one removes noise, two small bumps of activity will exist within the focus group. :References: _[1] http://www.scholarpedia.org/article/Neural_fields ''' from dana import * n = 40 p = 2*n+1 alpha, tau, h = 1.0, 0.1, 0 input = np.zeros((n,n)) focus = Group((n,n), '''dU/dt = alpha*(-V + tau*(L+I)) +h : float V = np.maximum(U,0) : float I : float L : float''') SparseConnection(input, focus('I'), np.ones((1,1))) SharedConnection(focus('V'), focus('L'), 1.25*gaussian((p,p),0.1) - 0.75*gaussian((p,p),1.0)) input[...] = gaussian((n,n),0.25,(0.5,0.5)) \ + gaussian((n,n),0.25,(-0.5,-0.5)) \ + (2*rnd.random((n,n))-1)*.05 run(time=5.0, dt=0.01) fig = plt.figure(figsize=(12,5)) plt.subplot(121) plt.imshow(input, origin='lower', cmap = plt.cm.Purples, interpolation='nearest', extent=[0,n,0,n]) plt.text(1,1, "Input", fontsize=24) plt.yticks(np.arange(focus.shape[0]//10+1)*10) plt.xticks(np.arange(focus.shape[1]//10+1)*10) plt.grid() plt.subplot(122) plt.imshow(focus.V, origin='lower', cmap = plt.cm.Purples, interpolation='nearest', extent=[0,n,0,n]) plt.text(1,1, "Focus", fontsize=24) plt.yticks(np.arange(focus.shape[0]//10+1)*10) plt.xticks(np.arange(focus.shape[1]//10+1)*10) plt.grid() plt.show()
rougier/dana
examples/DNF-2d.py
Python
bsd-3-clause
4,057
[ "Gaussian", "NEURON" ]
6735b9d928e070634cc18441bc2b35bd8f4a2c114d4d2cda9b6ea2460e604b86
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php ########################################################################## # # Copyright (c) 2005 Imaginary Landscape LLC and Contributors. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ########################################################################## """ Implementation of cookie signing as done in `mod_auth_tkt <http://www.openfusion.com.au/labs/mod_auth_tkt/>`_. mod_auth_tkt is an Apache module that looks for these signed cookies and sets ``REMOTE_USER``, ``REMOTE_USER_TOKENS`` (a comma-separated list of groups) and ``REMOTE_USER_DATA`` (arbitrary string data). This module is an alternative to the ``paste.auth.cookie`` module; it's primary benefit is compatibility with mod_auth_tkt, which in turn makes it possible to use the same authentication process with non-Python code run under Apache. """ import time as time_mod import md5 import Cookie from paste import request class AuthTicket(object): """ This class represents an authentication token. You must pass in the shared secret, the userid, and the IP address. Optionally you can include tokens (a list of strings, representing role names), 'user_data', which is arbitrary data available for your own use in later scripts. Lastly, you can override the cookie name and timestamp. Once you provide all the arguments, use .cookie_value() to generate the appropriate authentication ticket. .cookie() generates a Cookie object, the str() of which is the complete cookie header to be sent. CGI usage:: token = auth_tkt.AuthTick('sharedsecret', 'username', os.environ['REMOTE_ADDR'], tokens=['admin']) print 'Status: 200 OK' print 'Content-type: text/html' print token.cookie() print ... redirect HTML ... Webware usage:: token = auth_tkt.AuthTick('sharedsecret', 'username', self.request().environ()['REMOTE_ADDR'], tokens=['admin']) self.response().setCookie('auth_tkt', token.cookie_value()) Be careful not to do an HTTP redirect after login; use meta refresh or Javascript -- some browsers have bugs where cookies aren't saved when set on a redirect. """ def __init__(self, secret, userid, ip, tokens=(), user_data='', time=None, cookie_name='auth_tkt', secure=False): self.secret = secret self.userid = userid self.ip = ip self.tokens = ','.join(tokens) self.user_data = user_data if time is None: self.time = time_mod.time() else: self.time = time self.cookie_name = cookie_name self.secure = secure def digest(self): return calculate_digest( self.ip, self.time, self.secret, self.userid, self.tokens, self.user_data) def cookie_value(self): v = '%s%08x%s!' % (self.digest(), int(self.time), self.userid) if self.tokens: v += self.tokens + '!' v += self.user_data return v def cookie(self): c = Cookie.SimpleCookie() c[self.cookie_name] = self.cookie_value().encode('base64').strip().replace('\n', '') c[self.cookie_name]['path'] = '/' if self.secure: c[self.cookie_name]['secure'] = 'true' return c class BadTicket(Exception): """ Exception raised when a ticket can't be parsed. If we get far enough to determine what the expected digest should have been, expected is set. This should not be shown by default, but can be useful for debugging. """ def __init__(self, msg, expected=None): self.expected = expected Exception.__init__(self, msg) def parse_ticket(secret, ticket, ip): """ Parse the ticket, returning (timestamp, userid, tokens, user_data). If the ticket cannot be parsed, ``BadTicket`` will be raised with an explanation. """ ticket = ticket.strip('"') digest = ticket[:32] try: timestamp = int(ticket[32:40], 16) except ValueError, e: raise BadTicket('Timestamp is not a hex integer: %s' % e) try: userid, data = ticket[40:].split('!', 1) except ValueError: raise BadTicket('userid is not followed by !') if '!' in data: tokens, user_data = data.split('!', 1) else: # @@: Is this the right order? tokens = '' user_data = data expected = calculate_digest(ip, timestamp, secret, userid, tokens, user_data) if expected != digest: raise BadTicket('Digest signature is not correct', expected=(expected, digest)) tokens = tokens.split(',') return (timestamp, userid, tokens, user_data) def calculate_digest(ip, timestamp, secret, userid, tokens, user_data): secret = maybe_encode(secret) userid = maybe_encode(userid) tokens = maybe_encode(tokens) user_data = maybe_encode(user_data) digest0 = md5.new( encode_ip_timestamp(ip, timestamp) + secret + userid + '\0' + tokens + '\0' + user_data).hexdigest() digest = md5.new(digest0 + secret).hexdigest() return digest def encode_ip_timestamp(ip, timestamp): ip_chars = ''.join(map(chr, map(int, ip.split('.')))) t = int(timestamp) ts = ((t & 0xff000000) >> 24, (t & 0xff0000) >> 16, (t & 0xff00) >> 8, t & 0xff) ts_chars = ''.join(map(chr, ts)) return ip_chars + ts_chars def maybe_encode(s, encoding='utf8'): if isinstance(s, unicode): s = s.encode(encoding) return s class AuthTKTMiddleware(object): """ Middleware that checks for signed cookies that match what `mod_auth_tkt <http://www.openfusion.com.au/labs/mod_auth_tkt/>`_ looks for (if you have mod_auth_tkt installed, you don't need this middleware, since Apache will set the environmental variables for you). Arguments: ``secret``: A secret that should be shared by any instances of this application. If this app is served from more than one machine, they should all have the same secret. ``cookie_name``: The name of the cookie to read and write from. Default ``auth_tkt``. ``secure``: If the cookie should be set as 'secure' (only sent over SSL) and if the login must be over SSL. ``include_ip``: If the cookie should include the user's IP address. If so, then if they change IPs their cookie will be invalid. ``logout_path``: The path under this middleware that should signify a logout. The page will be shown as usual, but the user will also be logged out when they visit this page. If used with mod_auth_tkt, then these settings (except logout_path) should match the analogous Apache configuration settings. This also adds two functions to the request: ``environ['paste.auth_tkt.set_user'](userid, tokens='', user_data='')`` This sets a cookie that logs the user in. ``tokens`` is a string (comma-separated groups) or a list of strings. ``user_data`` is a string for your own use. ``environ['paste.auth_tkt.logout_user']()`` Logs out the user. """ def __init__(self, app, secret, cookie_name='auth_tkt', secure=False, include_ip=True, logout_path=None): self.app = app self.secret = secret self.cookie_name = cookie_name self.secure = secure self.include_ip = include_ip self.logout_path = logout_path def __call__(self, environ, start_response): cookies = request.get_cookies(environ) if cookies.has_key(self.cookie_name): cookie_value = cookies[self.cookie_name].value else: cookie_value = '' if cookie_value: if self.include_ip: remote_addr = environ['REMOTE_ADDR'] else: # mod_auth_tkt uses this dummy value when IP is not # checked: remote_addr = '0.0.0.0' # @@: This should handle bad signatures better: # Also, timeouts should cause cookie refresh timestamp, userid, tokens, user_data = parse_ticket( self.secret, cookie_value, remote_addr) tokens = ','.join(tokens) environ['REMOTE_USER'] = userid if environ.get('REMOTE_USER_TOKENS'): # We want to add tokens/roles to what's there: tokens = environ['REMOTE_USER_TOKENS'] + ',' + tokens environ['REMOTE_USER_TOKENS'] = tokens environ['REMOTE_USER_DATA'] = user_data environ['AUTH_TYPE'] = 'cookie' set_cookies = [] def set_user(userid, tokens='', user_data=''): set_cookies.extend(self.set_user_cookie( environ, userid, tokens, user_data)) def logout_user(): set_cookies.extend(self.logout_user_cookie(environ)) environ['paste.auth_tkt.set_user'] = set_user environ['paste.auth_tkt.logout_user'] = logout_user if self.logout_path and environ.get('PATH_INFO') == self.logout_path: logout_user() def cookie_setting_start_response(status, headers, exc_info=None): headers.extend(set_cookies) return start_response(status, headers, exc_info) return self.app(environ, cookie_setting_start_response) def set_user_cookie(self, environ, userid, tokens, user_data): if not isinstance(tokens, basestring): tokens = ','.join(tokens) if self.include_ip: remote_addr = environ['REMOTE_ADDR'] else: remote_addr = '0.0.0.0' ticket = AuthTicket( self.secret, userid, remote_addr, tokens=tokens, user_data=user_data, cookie_name=self.cookie_name, secure=self.secure) # @@: Should we set REMOTE_USER etc in the current # environment right now as well? cookies = [ ('Set-Cookie', '%s=%s; Path=/' % ( self.cookie_name, ticket.cookie_value()))] return cookies def logout_user_cookie(self, environ): cur_domain = environ.get('HTTP_HOST', environ.get('SERVER_NAME')) wild_domain = '.' + cur_domain cookies = [ ('Set-Cookie', '%s=""; Path=/' % self.cookie_name), ('Set-Cookie', '%s=""; Path=/; Domain=%s' % (self.cookie_name, cur_domain)), ('Set-Cookie', '%s=""; Path=/; Domain=%s' % (self.cookie_name, wild_domain)), ] return cookies def make_auth_tkt_middleware( app, global_conf, secret=None, cookie_name='auth_tkt', secure=False, include_ip=True, logout_path=None): """ Creates the `AuthTKTMiddleware <class-paste.auth.auth_tkt.AuthTKTMiddleware.html>`_. ``secret`` is requird, but can be set globally or locally. """ from paste.deploy.converters import asbool secure = asbool(secure) include_ip = asbool(include_ip) if secret is None: secret = global_conf.get('secret') if not secret: raise ValueError( "You must provide a 'secret' (in global or local configuration)") return AuthTKTMiddleware( app, secret, cookie_name, secure, include_ip, logout_path or None)
santisiri/popego
envs/ALPHA-POPEGO/lib/python2.5/site-packages/Paste-1.4.2-py2.5.egg/paste/auth/auth_tkt.py
Python
bsd-3-clause
12,707
[ "VisIt" ]
1488e5e1cb9031fe7fa8e3af09664a4dda590e4efcfcb604a9e343721e74ea4f
import matplotlib.pyplot as plt import matplotlib.patches as mpatches import matplotlib.ticker as ticker import numpy as np import pkgutil import re from debug import Debug, Debuggable import stemming.porter2 from nltk.stem import PorterStemmer from sklearn.neighbors import KernelDensity from collections import OrderedDict from functools32 import lru_cache from scipy.spatial import distance class Text (Debuggable): @classmethod def from_file(cls, path, debug, stopwords=None, nostem=None): """ Create a text from a file. Args: path (str): The file path. """ with open(path, 'r') as f: return cls(f.read(), debug, stopwords, nostem) def __init__(self, text, debug, stopwords=None, nostem=None): """ Store the raw text, tokenize. Args: text (str): The raw text string. stopwords (str): A custom stopwords list path. """ self.debug = debug Debuggable.__init__(self, 'TextPlot') self.text = text self.load_stopwords(stopwords) self.load_nostem(nostem) self.tokenize() @staticmethod def show_stem(term): return stemming.porter2.stem(term) def stem(self, term): if not term in self.nostem: return stemming.porter2.stem(term) else: return term def load_nostem(self, path): """ Load a set of words that should not be stemmed. Args: path (str): The stopwords file path. """ if path: with open(path) as f: self.nostem = set(f.read().splitlines()) else: self.nostem = [] def load_stopwords(self, path): """ Load a set of stopwords. Args: path (str): The stopwords file path. """ if path: with open(path) as f: self.stopwords = set(f.read().splitlines()) else: try: self.stopwords = set( pkgutil .get_data('textplot', 'data/stopwords.txt') .decode('utf8') .splitlines() ) except: self.stopwords = [] def tokenize(self): """ Tokenize the text. """ self.tokens = [] self.terms = OrderedDict() # Generate tokens. for token in self.tokenizer(self.text): # Ignore stopwords. if token['unstemmed'] in self.stopwords: self.tokens.append(None) else: # Token: self.tokens.append(token) # Term: if token['unstemmed'] in self.nostem: offsets = self.terms.setdefault(token['unstemmed'], []) else: offsets = self.terms.setdefault(token['stemmed'], []) offsets.append(token['offset']) def tokenizer(self,text): """ Yield tokens. Args: text (str): The original text. Yields: dict: The next token. """ stem = PorterStemmer().stem tokens = re.finditer('[a-z]+', text.lower()) for offset, match in enumerate(tokens): # Get the raw token. unstemmed = match.group(0) yield { # Emit the token. 'stemmed': stem(unstemmed), 'unstemmed': unstemmed, 'offset': offset } @lru_cache(maxsize=None) def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'): """ Estimate the kernel density of the instances of term in the text. Args: term (str): A stemmed term. bandwidth (int): The kernel bandwidth. samples (int): The number of evenly-spaced sample points. kernel (str): The kernel function. Returns: np.array: The density estimate. """ # Get the offsets of the term instances. try: terms = np.array(self.terms[term])[:, np.newaxis] except: return 0 # Fit the density estimator on the terms. kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms) # Score an evely-spaced array of samples. x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis] scores = kde.score_samples(x_axis) # Scale the scores to integrate to 1. return np.exp(scores) * (len(self.tokens) / samples) def plot_terms_raw_count(self, terms, caption, word_count): """ Plot the X-axis offsets of a term. :param term: The unstemmed term to plot. """ fig, ax = plt.subplots() # Be sure to only pick integer tick locations. for axis in [ax.xaxis, ax.yaxis]: axis.set_major_locator(ticker.MaxNLocator(integer=True)) g1 = terms for term in g1: if self.stem(term) in self.terms: xs = self.terms[self.stem(term)] y,binEdges=np.histogram(xs, bins=len(self.tokens)/word_count, range=[0, len(self.tokens)]) bincenters = 0.5*(binEdges[1:]+binEdges[:-1]) average = int(float(sum(y))/float(len(y))) self.debug.print_debug(self, u'The term {0} appears on average {1} times every {2} words'.format(term, average, word_count)) plt.plot(bincenters, y, label=term) plt.xlabel('Word Offset') plt.ylabel('Number of Occurrences') plt.title(caption) plt.legend(loc='upper right') fig = plt.gcf() fig.set_size_inches(10, 4) fig.tight_layout() return plt def plot_terms_histogram(self, terms, caption, word_count): """ Plot the X-axis offsets of a term. :param term: The unstemmed term to plot. """ fig, ax = plt.subplots() # Be sure to only pick integer tick locations. for axis in [ax.xaxis, ax.yaxis]: axis.set_major_locator(ticker.MaxNLocator(integer=True)) g1 = terms for term in g1: if self.stem(term) in self.terms: xs = self.terms[self.stem(term)] plt.hist(xs, bins=len(self.tokens)/word_count, alpha=0.9, range=[0, len(self.tokens)], label=term) plt.xlim(0, len(self.tokens)) plt.xlabel('Word Offset') plt.ylabel('Number of Occurrences') plt.title(caption) plt.legend(loc='upper right') fig = plt.gcf() fig.set_size_inches(10, 4) fig.tight_layout() return plt def plot_terms(self, terms, caption, **kwargs): g1 = terms for term in g1: kde = self.kde(self.stem(term), **kwargs) plt.plot(kde, label=term) plt.xlabel('Word Offset') plt.ylabel('Number of Occurrences') plt.title(caption) plt.legend(loc='upper right') fig = plt.gcf() fig.set_size_inches(10, 4) fig.tight_layout() return plt def plot_terms_two_groups(self, terms, term_name, second_terms, second_term_name, caption, **kwargs): """ War vs. peace terms. """ g1 = terms g2 = second_terms for term in g1: kde = self.kde(self.stem(term), **kwargs) plt.plot(kde, color='#e8a945', label=term_name) for term in g2: kde = self.kde(self.stem(term), **kwargs) plt.plot(kde, color='#0067a2', label=second_term_name) plt.xlabel('Word Offset') plt.ylabel('Number of Occurrences') plt.title(caption) w_patch = mpatches.Patch(color='#e8a945', label=term_name) p_patch = mpatches.Patch(color='#0067a2', label=second_term_name) plt.legend(handles=[w_patch, p_patch], loc='upper right') fig = plt.gcf() fig.set_size_inches(10, 4) fig.tight_layout() return plt def score_braycurtis(self, term1, term2, **kwargs): """ Compute a weighting score based on the "City Block" distance between the kernel density estimates of two terms. :param term1: The first term. :param term2: The second term. """ t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) return 1-distance.braycurtis(t1_kde, t2_kde) def plot_kde_overlap(self, terms, color1='#0067a2', color2='#e8a945', overlap_color='#dddddd', **kwargs): term1 = terms[0] term2 = terms[1] t1 = self.stem(term1) t2 = self.stem(term2) bc = self.score_braycurtis(t1, t2, **kwargs) kde1 = self.kde(t1, **kwargs) kde2 = self.kde(t2, **kwargs) plt.plot(kde1, color=color1, label=term1) plt.plot(kde2, color=color2, label=term2) overlap = np.minimum(kde1, kde2) plt.fill(overlap, color=overlap_color) plt.title(term1+', '+term2+' - '+str(round(bc, 4))) plt.xlabel('Word Offset') plt.ylabel('Number of Occurrences') plt.legend(loc='upper right') fig = plt.gcf() fig.set_size_inches(10, 4) fig.tight_layout() return plt def sort_dict(self, d, reverse=True): """ Sort an ordered dictionary by value, descending. :param d: A dictionary. """ sort = sorted(d.iteritems(), key=lambda x: x[1], reverse=reverse) return OrderedDict(sort) def anchored_scores(self, anchor, method='braycurtis', **kwargs): """ Compute the intersections between an anchor term and all other terms. :param anchor: The anchor term. :param method: The scoring function. """ evaluator = getattr(self, 'score_'+method) pairs = OrderedDict() for term in self.terms: pairs[term] = evaluator(anchor, term, **kwargs) return self.sort_dict(pairs)
MartinPaulEve/PlotSummary
text.py
Python
mit
10,140
[ "Gaussian" ]
be3cd24f02b0f094c0a5784586c61190aa43ba6f8634a93be4b75005c9f44fb0
import os import os.path import unittest import random import math import tempfile import time import numpy import SimpleITK as sitk import sitkUtils from __main__ import vtk, qt, ctk, slicer from slicer.ScriptedLoadableModule import * def generateModel(modelFiducialName, radius, nFiducials, workingDir): modelFiducialNode = None # If the file exists, load it. Otherwise, we generate one. if os.path.isfile(workingDir+'/'+modelFiducialName+'.fcsv'): (r, modelFiducialNode) = slicer.util.loadMarkupsFiducialList(workingDir+'/'+modelFiducialName+'.fcsv', True) else: modelFiducialNode = slicer.mrmlScene.CreateNodeByClass("vtkMRMLMarkupsFiducialNode") slicer.mrmlScene.AddNode(modelFiducialNode) modelFiducialNode.SetName(modelFiducialName) testLogic.configFiducialModel(modelFiducialNode, radius, nFiducials, 20.0) slicer.util.saveNode(modelFiducialNode, workingDir+'/'+modelFiducialName+'.fcsv') return modelFiducialNode # Generate a random transform. If the transform already exists in the working directory, # the function will load it to the scene. def generateRandomTransform(randomMatrix, randomTransformName, workingDir, imageFOV, dummyFiducialNode): randomTransform = None rangeOffset = 10.0 xRange = [-imageFOV[0]/2.0+rangeOffset, imageFOV[0]/2.0-rangeOffset] yRange = [-imageFOV[1]/2.0+rangeOffset, imageFOV[1]/2.0-rangeOffset] zRange = [-imageFOV[2]/2.0+rangeOffset, imageFOV[2]/2.0-rangeOffset] # Dummy fiducial for checking the range nFid = dummyFiducialNode.GetNumberOfFiducials() if os.path.isfile(workingDir+'/'+randomTransformName+'.h5'): (r, randomTransform) = slicer.util.loadTransform(workingDir+'/'+randomTransformName+'.h5', True) randomTransform.GetMatrixTransformToParent(randomMatrix) slicer.mrmlScene.RemoveNode(randomTransform) else: fFound = False while fFound == False: testLogic.generateRandomTransform(xRange, yRange, zRange, randomMatrix) # Check range fOutOfRange = False for m in range(0, nFid): pos = [0.0, 0.0, 0.0] tpos = [0.0, 0.0, 0.0, 1.0] dummyFiducialNode.GetNthFiducialPosition(m, pos) pos.append(1.0) randomMatrix.MultiplyPoint(pos, tpos) if (tpos[0] < xRange[0]) or (tpos[0] > xRange[1]) or (tpos[1] < yRange[0]) or (tpos[1] > yRange[1]) or (tpos[2] < zRange[0]) or (tpos[2] > zRange[1]): fOutOfRange = True if fOutOfRange == False: print "Found random transform." randomTransform = slicer.mrmlScene.CreateNodeByClass("vtkMRMLLinearTransformNode") randomTransform.SetMatrixTransformToParent(randomMatrix) slicer.mrmlScene.AddNode(randomTransform) randomTransform.SetName(randomTransformName) slicer.util.saveNode(randomTransform, workingDir+'/'+randomTransformName+'.h5') slicer.mrmlScene.RemoveNode(randomTransform) fFound = True else: print "Random transform out of range." def generateTestFiducial(modelFiducialNode, randomMatrix): testFiducialNode = slicer.mrmlScene.CreateNodeByClass("vtkMRMLMarkupsFiducialNode") slicer.mrmlScene.AddNode(testFiducialNode) testFiducialNode.RemoveAllMarkups() nFid = modelFiducialNode.GetNumberOfFiducials() for m in range(0, nFid): pos = [0.0, 0.0, 0.0] modelFiducialNode.GetNthFiducialPosition(m, pos) lb = modelFiducialNode.GetNthFiducialLabel(m) testFiducialNode.AddFiducialFromArray(pos, lb) testFiducialNode.ApplyTransformMatrix(randomMatrix) return testFiducialNode def generateTestVolume(testFiducialNode, imageFOV, pixelSpacing, thickness, workingDir): testVolumeNode = None if os.path.isfile(workingDir+'/'+testVolumeName+'.h5'): (r, testVolumeNode) = slicer.util.loadVolume(workingDir+'/'+testVolumeName+'.nrrd', {}, True ) else: ### Volume node for template volume (Volume that represents the size/resolution for marker images) templateVolumeNode = slicer.mrmlScene.CreateNodeByClass("vtkMRMLScalarVolumeNode") slicer.mrmlScene.AddNode(templateVolumeNode) templateVolumeNode.SetName("Template Volume") # Create template volume using the ImageMaker module imageMakerParameters = {} imageMakerParameters["OutputVolume"] = templateVolumeNode.GetID() imageMakerParameters["ScalarType"] = "unsigned_short" imageMakerParameters["NumberOfComponents"] = 1 imageMakerParameters["Dimension"] = 3 imageMakerParameters["Size"] = [int(imageFOV[0]/pixelSpacing), int(imageFOV[1]/pixelSpacing), int(imageFOV[2]/thickness)] imageMakerParameters["Origin"] = [-imageFOV[0]/2.0, -imageFOV[1]/2.0, -imageFOV[2]/2.0] imageMakerParameters["Spacing"] = [pixelSpacing, pixelSpacing, thickness] imageMakerParameters["Direction"] = [1.00, 0.00, 0.00, 0.00, 1.00, 0.00, 0.00, 0.00, 1.00] imageMakerParameters["defaultVoxelValue"] = 100 slicer.cli.run(imageMakerCLI, None, imageMakerParameters, True) testVolumeNode = slicer.mrmlScene.CreateNodeByClass("vtkMRMLScalarVolumeNode") slicer.mrmlScene.AddNode(testVolumeNode) testLogic.generateFiducialImage(templateVolumeNode, testVolumeNode, testFiducialNode) testVolumeNode.SetName(testVolumeName) slicer.util.saveNode(testVolumeNode, workingDir+'/'+testVolumeNode.GetName()+'.nrrd') slicer.mrmlScene.RemoveNode(templateVolumeNode) return testVolumeNode def addGaussianNoise(inputImageNode, outputImageNodeName, sd, mean): input = sitk.Cast(sitkUtils.PullFromSlicer(inputImageNode.GetID()), sitk.sitkUInt16) noiseFilter = sitk.AdditiveGaussianNoiseImageFilter() noiseFilter.SetDebug(False) noiseFilter.SetMean(mean) noiseFilter.SetSeed(0) noiseFilter.SetStandardDeviation(sd) output = noiseFilter.Execute(input) sitkUtils.PushToSlicer(output, outputImageNodeName, 0, True) def computeEstimatedTRE(referenceMatrix, resultMatrix, needleLength): tipOffset = [0.0, 0.0, -needleLength, 1.0] referenceTipPos = [0.0, 0.0, 0.0, 1.0] registeredTipPos= [0.0, 0.0, 0.0, 1.0] referenceMatrix.MultiplyPoint(tipOffset, referenceTipPos) resultMatrix.MultiplyPoint(tipOffset, registeredTipPos) npReferenceTipPos = numpy.array(referenceTipPos[0:3]) npRegisteredTipPos = numpy.array(registeredTipPos[0:3]) errorVector = npReferenceTipPos - npRegisteredTipPos return numpy.linalg.norm(errorVector) ### Parameters lt = time.localtime() workingDir = "/Users/junichi/Experiments/FiducialTest/Test-%04d-%02d-%02d-%02d-%02d-%02d" % (lt.tm_year, lt.tm_mon, lt.tm_mday, lt.tm_hour, lt.tm_min, lt.tm_sec) if not os.path.exists(workingDir): os.makedirs(workingDir) logFileName = "log-%04d-%02d-%02d-%02d-%02d-%02d.txt" % (lt.tm_year, lt.tm_mon, lt.tm_mday, lt.tm_hour, lt.tm_min, lt.tm_sec) csvFileName = "result-%04d-%02d-%02d-%02d-%02d-%02d.csv" % (lt.tm_year, lt.tm_mon, lt.tm_mday, lt.tm_hour, lt.tm_min, lt.tm_sec) nTrialsPerCondition = 20 # Fiducial and volume parameters radius = 92 imageFOV = [300, 255, 150] pixelSpacing = 0.9375 thicknessStep = 1.0 nThicknessSteps = 4 # Range for random transform xRange = [-50.0, 50.0] yRange = [-36.0, 36.0] zRange = [-20.0, 20.0] ### Setup modules slicer.util.selectModule('FiducialRegistrationTest') testLogic = slicer.modules.FiducialRegistrationTestWidget.logic imageMakerCLI = slicer.modules.imagemaker testLogic.logFilePath = workingDir + '/' + logFileName testLogic.logFile = open(testLogic.logFilePath, 'a') testLogic.printLog("Console Test > trial, thickness \n") csvFilePath = workingDir + '/' + csvFileName csvFile = open(csvFilePath, 'a') csvFile.write('nFiducials, Thickness, Noise, Trial, FRE, FLE, TRE, Nfid, ProcTime, WallTime\n') ### Dummy fiducial to check the range dummyFiducialNode = generateModel('DummyFiducialFrame', radius, 20, workingDir) ### Generate transform randomMatrix = vtk.vtkMatrix4x4() ## Three comparisons: ## 1. Noise vs Number of fiducials (slice thickness = 2.0mm) ## 2. Slice thickness vs number of fiducials (noise = 0.2) ## 3. Noise vs Slice thickness (number of fiducils = 8) for trial in range(0, nTrialsPerCondition): randomTransformName = "TestRandomTransform-%03d" % (trial) generateRandomTransform(randomMatrix, randomTransformName, workingDir, imageFOV, dummyFiducialNode) for nFiducials in range (5, 10): ### Generate or load a fiducial model modelFiducialName = "Model-Fiducial-%d-%d-%03d" % (radius, nFiducials, trial) modelFiducialNode = generateModel(modelFiducialName, radius, nFiducials, workingDir) testFiducialNode = generateTestFiducial(modelFiducialNode, randomMatrix) thickness = 2.0 ## 1. Noise vs Number of fiducials (Silce thickness = 2.0 mm) testVolumeName = "TestImage-%02d-%d-%03d" % (nFiducials, thickness, trial) testVolumeNode = generateTestVolume(testFiducialNode, imageFOV, pixelSpacing, thickness, workingDir) for noise in numpy.arange(0.0, 0.6, 0.1): testLogic.printLog("Console Test > %d, %d, %f , %f\n" % (nFiducials, trial, thickness, noise)) ## Default voxel value is 100 sd = 100.0 * noise noiseVolumeNodeName = "NoiseImage" addGaussianNoise(testVolumeNode, noiseVolumeNodeName, sd, 0.0) noiseVolumeNode = slicer.util.getNode(noiseVolumeNodeName) resultMatrix = vtk.vtkMatrix4x4() (fre, fle, nFidDetected, procTime, wallTime) = testLogic.runRegistration(modelFiducialNode, noiseVolumeNode, testFiducialNode, resultMatrix) tre = computeEstimatedTRE(randomMatrix, resultMatrix, 150) csvFile.write('%d, %f, %f, %d, %f, %f, %f, %d, %f, %f\n' % (nFiducials, thickness, noise, trial, fre, fle, tre, nFidDetected, procTime, wallTime)) noiseOutputVolumeName = "NoiseImage-%02d-%d-%f-%03d" % (nFiducials, thickness, noise, trial) slicer.util.saveNode(noiseVolumeNode, workingDir+'/'+noiseOutputVolumeName+'.nrrd') slicer.mrmlScene.RemoveNode(noiseVolumeNode) slicer.mrmlScene.RemoveNode(testVolumeNode) ## 2. Slice thickness vs Number of fiducials (Noise = 0.2) noise = 0.2 for thickness in numpy.arange(1.0, thicknessStep*nThicknessSteps+0.001, thicknessStep): testLogic.printLog("Console Test > %d, %d, %f , %f\n" % (nFiducials, trial, thickness, noise)) testVolumeName = "TestImage-thickness-%02d-%d-%03d" % (nFiducials, thickness, trial) testVolumeNode = generateTestVolume(testFiducialNode, imageFOV, pixelSpacing, thickness, workingDir) ## Default voxel value is 100 sd = 100.0 * noise noiseVolumeNodeName = "NoiseImage" addGaussianNoise(testVolumeNode, noiseVolumeNodeName, sd, 0.0) noiseVolumeNode = slicer.util.getNode(noiseVolumeNodeName) resultMatrix = vtk.vtkMatrix4x4() (fre, fle, nFidDetected, procTime, wallTime) = testLogic.runRegistration(modelFiducialNode, noiseVolumeNode, testFiducialNode, resultMatrix) tre = computeEstimatedTRE(randomMatrix, resultMatrix, 150) csvFile.write('%d, %f, %f, %d, %f, %f, %f, %d, %f, %f\n' % (nFiducials, thickness, noise, trial, fre, fle, tre, nFidDetected, procTime, wallTime)) slicer.mrmlScene.RemoveNode(testVolumeNode) slicer.mrmlScene.RemoveNode(noiseVolumeNode) slicer.mrmlScene.RemoveNode(modelFiducialNode) slicer.mrmlScene.RemoveNode(testFiducialNode) ### 3. Noise vs Slice thickness (number of fiducils = 8) #nFiducials = 8 #modelFiducialName = "Model-Fiducial-%d-%d-%03d" % (radius, nFiducials, trial) #modelFiducialNode = generateModel(modelFiducialName, radius, nFiducials, workingDir) #testFiducialNode = generateTestFiducial(modelFiducialNode, randomMatrix) # #for thickness in numpy.arange(1.0, thicknessStep*nThicknessSteps+0.001, thicknessStep): # # testVolumeName = "TestImage-thickness-%02d-%d-%03d" % (nFiducials, thickness, trial) # testVolumeNode = generateTestVolume(testFiducialNode, imageFOV, pixelSpacing, thickness, workingDir) # # for noise in numpy.arange(0.0, 0.6, 0.1): # testLogic.printLog("Console Test > %d, %d, %f , %f\n" % (nFiducials, trial, thickness, noise)) # # ## Default voxel value is 100 (see FiducialRegistrationTest module) # sd = 200.0 * noise # noiseVolumeNodeName = "NoiseImage" # addGaussianNoise(testVolumeNode, noiseVolumeNodeName, sd, 0.0) # noiseVolumeNode = slicer.util.getNode(noiseVolumeNodeName) # # resultMatrix = vtk.vtkMatrix4x4() # (fre, fle, nFidDetected, procTime, wallTime) = testLogic.runRegistration(modelFiducialNode, noiseVolumeNode, testFiducialNode, resultMatrix) # # tre = computeEstimatedTRE(randomMatrix, resultMatrix, 150) # csvFile.write('%d, %f, %f, %d, %f, %f, %f, %d, %f, %f\n' % (nFiducials, thickness, noise, trial, fre, fle, tre, nFidDetected, procTime, wallTime)) # # slicer.mrmlScene.RemoveNode(noiseVolumeNode) # # slicer.mrmlScene.RemoveNode(testVolumeNode) slicer.mrmlScene.RemoveNode(dummyFiducialNode) if testLogic.logFile: testLogic.logFile.close() if csvFile: csvFile.close()
tokjun/FiducialRegistrationTest
ConsoleTest/FiducialRegistrationConsoleTest.py
Python
bsd-3-clause
13,969
[ "VTK" ]
c983f1179e54c3cf44d8da2c6d94d7179ca09367daa2d5810a81390875810210
import discord from discord.ext import commands from __main__ import send_cmd_help from bs4 import BeautifulSoup import random class Nsfw: """Nsfw commands.""" def __init__(self, bot): self.bot = bot self.session = self.bot.http.session @commands.group(pass_context=True) async def nsfw(self, ctx): """Nsfw Commands""" if ctx.invoked_subcommand is None: await send_cmd_help(ctx) @nsfw.command(no_pm=True, pass_context=True) async def yandere(self, ctx): """Random Image From Yandere""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return try: query = ("https://yande.re/post/random") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="highres").get("href") await self.bot.say(image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(no_pm=True, pass_context=True) async def konachan(self, ctx): """Random Image From Konachan""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return try: query = ("https://konachan.com/post/random") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="highres").get("href") await self.bot.say(image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(no_pm=True, pass_context=True) async def e621(self, ctx): """Random Image From e621""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return try: query = ("https://e621.net/post/random") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="highres").get("href") await self.bot.say(image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(no_pm=True, pass_context=True) async def rule34(self, ctx): """Random Image From rule34""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return try: query = ("http://rule34.xxx/index.php?page=post&s=random") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="image").get("src") await self.bot.say('http:' + image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(no_pm=True, pass_context=True) async def danbooru(self, ctx): """Random Image From Danbooru""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return try: query = ("http://danbooru.donmai.us/posts/random") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="image").get("src") await self.bot.say('http://danbooru.donmai.us' + image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(no_pm=True, pass_context=True) async def gelbooru(self, ctx): """Random Image From Gelbooru""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return try: query = ("http://www.gelbooru.com/index.php?page=post&s=random") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="image").get("src") await self.bot.say(image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(no_pm=True, pass_context=True) async def tbib(self, ctx): """Random Image From DrunkenPumken""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return try: query = ("http://www.tbib.org/index.php?page=post&s=random") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="image").get("src") await self.bot.say("http:" + image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(no_pm=True, pass_context=True) async def xbooru(self, ctx): """Random Image From Xbooru""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return try: query = ("http://xbooru.com/index.php?page=post&s=random") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="image").get("src") await self.bot.say(image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(no_pm=True, pass_context=True) async def furrybooru(self, ctx): """Random Image From Furrybooru""" try: query = ("http://furry.booru.org/index.php?page=post&s=random") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="image").get("src") await self.bot.say(image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(no_pm=True, pass_context=True) async def drunkenpumken(self, ctx): """Random Image From DrunkenPumken""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return try: query = ("http://drunkenpumken.booru.org/index.php?page=post&s=random") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="image").get("src") await self.bot.say(image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(no_pm=True, pass_context=True) async def lolibooru(self, ctx): """Random Image From Lolibooru""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return try: query = ("https://lolibooru.moe/post/random/") page = await self.session.get(query) page = await page.text() soup = BeautifulSoup(page, 'html.parser') image = soup.find(id="image").get("src") image = image.replace(' ', '%20') await self.bot.say(image) except Exception as e: await self.bot.say(":x: **Error:** `{}`".format(e)) @nsfw.command(pass_context=True, no_pm=True) async def ysearch(self, ctx, *tags: str): """Search Yandere With A Tag""" channel_nsfw = await self.is_nsfw(ctx.message.channel) if not channel_nsfw: return if tags == (): await self.bot.say(":warning: Tags are missing.") else: try: tags = ("+").join(tags) query = ("https://yande.re/post.json?limit=42&tags=" + tags) page = await self.session.get(query) json = await page.json() if json != []: await self.bot.say(random.choice(json)['jpeg_url']) else: await self.bot.say(":warning: Yande.re has no images for requested tags.") except Exception as e: await self.bot.say(":x: `{}`".format(e)) async def is_nsfw(self, channel: discord.Channel): try: _gid = channel.server.id except AttributeError: return False data = await self.bot.http.request( discord.http.Route( 'GET', '/guilds/{guild_id}/channels', guild_id=_gid)) channeldata = [d for d in data if d['id'] == channel.id][0] return channeldata['nsfw'] def setup(bot): n = Nsfw(bot) bot.add_cog(n)
Eslyium/Eslyium-Cogs
nsfw/nsfw.py
Python
mit
8,861
[ "MOE" ]
bbea97e813d5c1e04022e4fb72a9a321cf3694105f1e7b36b6daf821d2e61e17
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from <project_name>.api import xmlutil from <project_name> import test class SelectorTest(test.TestCase): obj_for_test = {'test': {'name': 'test', 'values': [1, 2, 3], 'attrs': {'foo': 1, 'bar': 2, 'baz': 3, }, }, } def test_empty_selector(self): sel = xmlutil.Selector() self.assertEqual(0, len(sel.chain)) self.assertEqual(self.obj_for_test, sel(self.obj_for_test)) def test_dict_selector(self): sel = xmlutil.Selector('test') self.assertEqual(1, len(sel.chain)) self.assertEqual('test', sel.chain[0]) self.assertEqual(self.obj_for_test['test'], sel(self.obj_for_test)) def test_datum_selector(self): sel = xmlutil.Selector('test', 'name') self.assertEqual(2, len(sel.chain)) self.assertEqual('test', sel.chain[0]) self.assertEqual('name', sel.chain[1]) self.assertEqual('test', sel(self.obj_for_test)) def test_list_selector(self): sel = xmlutil.Selector('test', 'values', 0) self.assertEqual(3, len(sel.chain)) self.assertEqual('test', sel.chain[0]) self.assertEqual('values', sel.chain[1]) self.assertEqual(0, sel.chain[2]) self.assertEqual(1, sel(self.obj_for_test)) def test_items_selector(self): sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items) self.assertEqual(3, len(sel.chain)) self.assertEqual(xmlutil.get_items, sel.chain[2]) for key, val in sel(self.obj_for_test): self.assertEqual(self.obj_for_test['test']['attrs'][key], val) def test_missing_key_selector(self): sel = xmlutil.Selector('test2', 'attrs') self.assertIsNone(sel(self.obj_for_test)) self.assertRaises(KeyError, sel, self.obj_for_test, True) def test_constant_selector(self): sel = xmlutil.ConstantSelector('Foobar') self.assertEqual('Foobar', sel.value) self.assertEqual('Foobar', sel(self.obj_for_test)) class TemplateElementTest(test.TestCase): def test_element_initial_attributes(self): # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3), c=4, d=5, e=6) # Verify all the attributes are as expected expected = dict(a=1, b=2, c=4, d=5, e=6) for k, v in expected.items(): self.assertEqual(v, elem.attrib[k].chain[0]) def test_element_get_attributes(self): expected = dict(a=1, b=2, c=3) # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=expected) # Verify that get() retrieves the attributes for k, v in expected.items(): self.assertEqual(v, elem.get(k).chain[0]) def test_element_set_attributes(self): attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar')) # Create a bare template element with no attributes elem = xmlutil.TemplateElement('test') # Set the attribute values for k, v in attrs.items(): elem.set(k, v) # Now verify what got set self.assertEqual(1, len(elem.attrib['a'].chain)) self.assertEqual('a', elem.attrib['a'].chain[0]) self.assertEqual(1, len(elem.attrib['b'].chain)) self.assertEqual('foo', elem.attrib['b'].chain[0]) self.assertEqual(attrs['c'], elem.attrib['c']) def test_element_attribute_keys(self): attrs = dict(a=1, b=2, c=3, d=4) expected = set(attrs.keys()) # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=attrs) # Now verify keys self.assertEqual(expected, set(elem.keys())) def test_element_attribute_items(self): expected = dict(a=xmlutil.Selector(1), b=xmlutil.Selector(2), c=xmlutil.Selector(3)) keys = set(expected.keys()) # Create a template element with some attributes elem = xmlutil.TemplateElement('test', attrib=expected) # Now verify items for k, v in elem.items(): self.assertEqual(expected[k], v) keys.remove(k) # Did we visit all keys? self.assertEqual(0, len(keys)) def test_element_selector_none(self): # Create a template element with no selector elem = xmlutil.TemplateElement('test') self.assertEqual(0, len(elem.selector.chain)) def test_element_selector_string(self): # Create a template element with a string selector elem = xmlutil.TemplateElement('test', selector='test') self.assertEqual(1, len(elem.selector.chain)) self.assertEqual('test', elem.selector.chain[0]) def test_element_selector(self): sel = xmlutil.Selector('a', 'b') # Create a template element with an explicit selector elem = xmlutil.TemplateElement('test', selector=sel) self.assertEqual(sel, elem.selector) def test_element_subselector_none(self): # Create a template element with no subselector elem = xmlutil.TemplateElement('test') self.assertIsNone(elem.subselector) def test_element_subselector_string(self): # Create a template element with a string subselector elem = xmlutil.TemplateElement('test', subselector='test') self.assertEqual(1, len(elem.subselector.chain)) self.assertEqual('test', elem.subselector.chain[0]) def test_element_subselector(self): sel = xmlutil.Selector('a', 'b') # Create a template element with an explicit subselector elem = xmlutil.TemplateElement('test', subselector=sel) self.assertEqual(sel, elem.subselector) def test_element_append_child(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(0, len(elem)) # Create a child element child = xmlutil.TemplateElement('child') # Append the child to the parent elem.append(child) # Verify that the child was added self.assertEqual(1, len(elem)) self.assertEqual(child, elem[0]) self.assertIn('child', elem) self.assertEqual(child, elem['child']) # Ensure that multiple children of the same name are rejected child2 = xmlutil.TemplateElement('child') self.assertRaises(KeyError, elem.append, child2) def test_element_extend_children(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(0, len(elem)) # Create a few children children = [xmlutil.TemplateElement('child1'), xmlutil.TemplateElement('child2'), xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) # Verify that the children were added self.assertEqual(3, len(elem)) for idx in range(len(elem)): self.assertEqual(children[idx], elem[idx]) self.assertIn(children[idx].tag, elem) self.assertEqual(children[idx], elem[children[idx].tag]) # Ensure that multiple children of the same name are rejected children2 = [xmlutil.TemplateElement('child4'), xmlutil.TemplateElement('child1'), ] self.assertRaises(KeyError, elem.extend, children2) # Also ensure that child4 was not added self.assertEqual(3, len(elem)) self.assertEqual('child3', elem[-1].tag) def test_element_insert_child(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(0, len(elem)) # Create a few children children = [xmlutil.TemplateElement('child1'), xmlutil.TemplateElement('child2'), xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) # Create a child to insert child = xmlutil.TemplateElement('child4') # Insert it elem.insert(1, child) # Ensure the child was inserted in the right place self.assertEqual(4, len(elem)) children.insert(1, child) for idx in range(len(elem)): self.assertEqual(children[idx], elem[idx]) self.assertIn(children[idx].tag, elem) self.assertEqual(children[idx], elem[children[idx].tag]) # Ensure that multiple children of the same name are rejected child2 = xmlutil.TemplateElement('child2') self.assertRaises(KeyError, elem.insert, 2, child2) def test_element_remove_child(self): # Create an element elem = xmlutil.TemplateElement('test') # Make sure the element starts off empty self.assertEqual(0, len(elem)) # Create a few children children = [xmlutil.TemplateElement('child1'), xmlutil.TemplateElement('child2'), xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) # Create a test child to remove child = xmlutil.TemplateElement('child2') # Try to remove it self.assertRaises(ValueError, elem.remove, child) # Ensure that no child was removed self.assertEqual(3, len(elem)) # Now remove a legitimate child elem.remove(children[1]) # Ensure that the child was removed self.assertEqual(2, len(elem)) self.assertEqual(children[0], elem[0]) self.assertEqual(children[2], elem[1]) self.assertNotIn('child2', elem) # Ensure the child cannot be retrieved by name def get_key(elem, key): return elem[key] self.assertRaises(KeyError, get_key, elem, 'child2') def test_element_text(self): # Create an element elem = xmlutil.TemplateElement('test') # Ensure that it has no text self.assertIsNone(elem.text) # Try setting it to a string and ensure it becomes a selector elem.text = 'test' self.assertTrue(hasattr(elem.text, 'chain')) self.assertEqual(1, len(elem.text.chain)) self.assertEqual('test', elem.text.chain[0]) # Try resetting the text to None elem.text = None self.assertIsNone(elem.text) # Now make up a selector and try setting the text to that sel = xmlutil.Selector() elem.text = sel self.assertEqual(sel, elem.text) # Finally, try deleting the text and see what happens del elem.text self.assertIsNone(elem.text) def test_apply_attrs(self): # Create a template element attrs = dict(attr1=xmlutil.ConstantSelector(1), attr2=xmlutil.ConstantSelector(2)) tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs) # Create an etree element elem = etree.Element('test') # Apply the template to the element tmpl_elem.apply(elem, None) # Now, verify the correct attributes were set for k, v in elem.items(): self.assertEqual(str(attrs[k].value), v) def test_apply_text(self): # Create a template element tmpl_elem = xmlutil.TemplateElement('test') tmpl_elem.text = xmlutil.ConstantSelector(1) # Create an etree element elem = etree.Element('test') # Apply the template to the element tmpl_elem.apply(elem, None) # Now, verify the text was set self.assertEqual(str(tmpl_elem.text.value), elem.text) def test__render(self): attrs = dict(attr1=xmlutil.ConstantSelector(1), attr2=xmlutil.ConstantSelector(2), attr3=xmlutil.ConstantSelector(3)) # Create a master template element master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) # Create a couple of slave template element slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']), xmlutil.TemplateElement('test', attr3=attrs['attr3']), ] # Try the render elem = master_elem._render(None, None, slave_elems, None) # Verify the particulars of the render self.assertEqual('test', elem.tag) self.assertEqual(0, len(elem.nsmap)) for k, v in elem.items(): self.assertEqual(str(attrs[k].value), v) # Create a parent for the element to be rendered parent = etree.Element('parent') # Try the render again... elem = master_elem._render(parent, None, slave_elems, dict(a='foo')) # Verify the particulars of the render self.assertEqual(1, len(parent)) self.assertEqual(parent[0], elem) self.assertEqual(1, len(elem.nsmap)) self.assertEqual('foo', elem.nsmap['a']) def test_render(self): # Create a template element tmpl_elem = xmlutil.TemplateElement('test') tmpl_elem.text = xmlutil.Selector() # Create the object we're going to render obj = ['elem1', 'elem2', 'elem3', 'elem4'] # Try a render with no object elems = tmpl_elem.render(None, None) self.assertEqual(0, len(elems)) # Try a render with one object elems = tmpl_elem.render(None, 'foo') self.assertEqual(1, len(elems)) self.assertEqual('foo', elems[0][0].text) self.assertEqual('foo', elems[0][1]) # Now, try rendering an object with multiple entries parent = etree.Element('parent') elems = tmpl_elem.render(parent, obj) self.assertEqual(4, len(elems)) # Check the results for idx in range(len(obj)): self.assertEqual(obj[idx], elems[idx][0].text) self.assertEqual(obj[idx], elems[idx][1]) def test_subelement(self): # Try the SubTemplateElement constructor parent = xmlutil.SubTemplateElement(None, 'parent') self.assertEqual('parent', parent.tag) self.assertEqual(0, len(parent)) # Now try it with a parent element child = xmlutil.SubTemplateElement(parent, 'child') self.assertEqual('child', child.tag) self.assertEqual(1, len(parent)) self.assertEqual(parent[0], child) def test_wrap(self): # These are strange methods, but they make things easier elem = xmlutil.TemplateElement('test') self.assertEqual(elem, elem.unwrap()) self.assertEqual(elem, elem.wrap().root) def test_dyntag(self): obj = ['a', 'b', 'c'] # Create a template element with a dynamic tag tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector()) # Try the render parent = etree.Element('parent') elems = tmpl_elem.render(parent, obj) # Verify the particulars of the render self.assertEqual(len(obj), len(elems)) for idx in range(len(obj)): self.assertEqual(obj[idx], elems[idx][0].tag) class TemplateTest(test.TestCase): def test_wrap(self): # These are strange methods, but they make things easier elem = xmlutil.TemplateElement('test') tmpl = xmlutil.Template(elem) self.assertEqual(elem, tmpl.unwrap()) self.assertEqual(tmpl, tmpl.wrap()) def test__siblings(self): # Set up a basic template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.Template(elem) # Check that we get the right siblings siblings = tmpl._siblings() self.assertEqual(1, len(siblings)) self.assertEqual(elem, siblings[0]) def test__splitTagName(self): test_cases = [ ('a', ['a']), ('a:b', ['a', 'b']), ('{http://test.com}a:b', ['{http://test.com}a', 'b']), ('a:b{http://test.com}:c', ['a', 'b{http://test.com}', 'c']), ] for test_case, expected in test_cases: result = xmlutil.TemplateElement._splitTagName(test_case) self.assertEqual(expected, result) def test__nsmap(self): # Set up a basic template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.Template(elem, nsmap=dict(a="foo")) # Check out that we get the right namespace dictionary nsmap = tmpl._nsmap() self.assertNotEqual(id(nsmap), id(tmpl.nsmap)) self.assertEqual(1, len(nsmap)) self.assertEqual('foo', nsmap['a']) def test_master_attach(self): # Set up a master template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.MasterTemplate(elem, 1) # Make sure it has a root but no slaves self.assertEqual(elem, tmpl.root) self.assertEqual(0, len(tmpl.slaves)) # Try to attach an invalid slave bad_elem = xmlutil.TemplateElement('test2') self.assertRaises(ValueError, tmpl.attach, bad_elem) self.assertEqual(0, len(tmpl.slaves)) # Try to attach an invalid and a valid slave good_elem = xmlutil.TemplateElement('test') self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem) self.assertEqual(0, len(tmpl.slaves)) # Try to attach an inapplicable template class InapplicableTemplate(xmlutil.Template): def apply(self, master): return False inapp_tmpl = InapplicableTemplate(good_elem) tmpl.attach(inapp_tmpl) self.assertEqual(0, len(tmpl.slaves)) # Now try attaching an applicable template tmpl.attach(good_elem) self.assertEqual(1, len(tmpl.slaves)) self.assertEqual(good_elem, tmpl.slaves[0].root) def test_master_copy(self): # Construct a master template elem = xmlutil.TemplateElement('test') tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo')) # Give it a slave slave = xmlutil.TemplateElement('test') tmpl.attach(slave) # Construct a copy copy = tmpl.copy() # Check to see if we actually managed a copy self.assertNotEqual(tmpl, copy) self.assertEqual(tmpl.root, copy.root) self.assertEqual(tmpl.version, copy.version) self.assertEqual(id(tmpl.nsmap), id(copy.nsmap)) self.assertNotEqual(id(tmpl.slaves), id(copy.slaves)) self.assertEqual(len(tmpl.slaves), len(copy.slaves)) self.assertEqual(tmpl.slaves[0], copy.slaves[0]) def test_slave_apply(self): # Construct a master template elem = xmlutil.TemplateElement('test') master = xmlutil.MasterTemplate(elem, 3) # Construct a slave template with applicable minimum version slave = xmlutil.SlaveTemplate(elem, 2) self.assertTrue(slave.apply(master)) # Construct a slave template with equal minimum version slave = xmlutil.SlaveTemplate(elem, 3) self.assertTrue(slave.apply(master)) # Construct a slave template with inapplicable minimum version slave = xmlutil.SlaveTemplate(elem, 4) self.assertFalse(slave.apply(master)) # Construct a slave template with applicable version range slave = xmlutil.SlaveTemplate(elem, 2, 4) self.assertTrue(slave.apply(master)) # Construct a slave template with low version range slave = xmlutil.SlaveTemplate(elem, 1, 2) self.assertFalse(slave.apply(master)) # Construct a slave template with high version range slave = xmlutil.SlaveTemplate(elem, 4, 5) self.assertFalse(slave.apply(master)) # Construct a slave template with matching version range slave = xmlutil.SlaveTemplate(elem, 3, 3) self.assertTrue(slave.apply(master)) def test__serialize(self): # Our test object to serialize obj = {'test': {'name': 'foobar', 'values': [1, 2, 3, 4], 'attrs': {'a': 1, 'b': 2, 'c': 3, 'd': 4, }, 'image': {'name': 'image_foobar', 'id': 42, }, }, } # Set up our master template root = xmlutil.TemplateElement('test', selector='test', name='name') value = xmlutil.SubTemplateElement(root, 'value', selector='values') value.text = xmlutil.Selector() attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs') xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items, key=0, value=1) master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo')) # Set up our slave template root_slave = xmlutil.TemplateElement('test', selector='test') image = xmlutil.SubTemplateElement(root_slave, 'image', selector='image', id='id') image.text = xmlutil.Selector('name') slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar')) # Attach the slave to the master... master.attach(slave) # Try serializing our object siblings = master._siblings() nsmap = master._nsmap() result = master._serialize(None, obj, siblings, nsmap) # Now we get to manually walk the element tree... self.assertEqual('test', result.tag) self.assertEqual(2, len(result.nsmap)) self.assertEqual('foo', result.nsmap['f']) self.assertEqual('bar', result.nsmap['b']) self.assertEqual(result.get('name'), obj['test']['name']) for idx, val in enumerate(obj['test']['values']): self.assertEqual('value', result[idx].tag) self.assertEqual(str(val), result[idx].text) idx += 1 self.assertEqual('attrs', result[idx].tag) for attr in result[idx]: self.assertEqual('attr', attr.tag) self.assertEqual(str(obj['test']['attrs'][attr.get('key')]), attr.get('value')) idx += 1 self.assertEqual('image', result[idx].tag) self.assertEqual(str(obj['test']['image']['id']), result[idx].get('id')) self.assertEqual(obj['test']['image']['name'], result[idx].text) def test_serialize_with_delimiter(self): # Our test object to serialize obj = {'test': {'scope0:key1': 'Value1', 'scope0:scope1:key2': 'Value2', 'scope0:scope1:scope2:key3': 'Value3' }} # Set up our master template root = xmlutil.TemplateElement('test', selector='test') key1 = xmlutil.SubTemplateElement(root, 'scope0:key1', selector='scope0:key1') key1.text = xmlutil.Selector() key2 = xmlutil.SubTemplateElement(root, 'scope0:scope1:key2', selector='scope0:scope1:key2') key2.text = xmlutil.Selector() key3 = xmlutil.SubTemplateElement(root, 'scope0:scope1:scope2:key3', selector='scope0:scope1:scope2:key3') key3.text = xmlutil.Selector() serializer = xmlutil.MasterTemplate(root, 1) xml_list = [] xml_list.append("<?xmlversion='1.0'encoding='UTF-8'?><test>") xml_list.append("<scope0><key1>Value1</key1><scope1>") xml_list.append("<key2>Value2</key2><scope2><key3>Value3</key3>") xml_list.append("</scope2></scope1></scope0></test>") expected_xml = ''.join(xml_list) result = serializer.serialize(obj) result = result.replace('\n', '').replace(' ', '') self.assertEqual(expected_xml, result) class MasterTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') return xmlutil.MasterTemplate(elem, 1) class SlaveTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') return xmlutil.SlaveTemplate(elem, 1) class TemplateBuilderTest(test.TestCase): def test_master_template_builder(self): # Make sure the template hasn't been built yet self.assertIsNone(MasterTemplateBuilder._tmpl) # Now, construct the template tmpl1 = MasterTemplateBuilder() # Make sure that there is a template cached... self.assertIsNotNone(MasterTemplateBuilder._tmpl) # Make sure it wasn't what was returned... self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt cached = MasterTemplateBuilder._tmpl tmpl2 = MasterTemplateBuilder() self.assertEqual(MasterTemplateBuilder._tmpl, cached) # Make sure we're always getting fresh copies self.assertNotEqual(tmpl1, tmpl2) # Make sure we can override the copying behavior tmpl3 = MasterTemplateBuilder(False) self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3) def test_slave_template_builder(self): # Make sure the template hasn't been built yet self.assertIsNone(SlaveTemplateBuilder._tmpl) # Now, construct the template tmpl1 = SlaveTemplateBuilder() # Make sure there is a template cached... self.assertIsNotNone(SlaveTemplateBuilder._tmpl) # Make sure it was what was returned... self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt tmpl2 = SlaveTemplateBuilder() self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) # Make sure we're always getting the cached copy self.assertEqual(tmpl1, tmpl2) class MiscellaneousXMLUtilTests(test.TestCase): def test_make_flat_dict(self): expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n" '<wrapper><a>foo</a><b>bar</b></wrapper>') root = xmlutil.make_flat_dict('wrapper') tmpl = xmlutil.MasterTemplate(root, 1) result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar'))) self.assertEqual(expected_xml, result)
hahaps/openstack-project-generator
template/<project_name>/tests/unit/api/test_xmlutil.py
Python
apache-2.0
27,293
[ "VisIt" ]
e7cfd747cf7d773a52ca82bd38bec21c2f2b5cb9435ea9389673dccf12f4c895
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2014 Stanford University and the Authors # # Authors: Robert McGibbon # Contributors: Kyle A. Beauchamp, TJ Lane, Joshua Adelman, Lee-Ping Wang # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## ############################################################################## # Imports ############################################################################## from __future__ import print_function, division import os import warnings import functools from copy import deepcopy from collections import Iterable import numpy as np from mdtraj.formats import DCDTrajectoryFile from mdtraj.formats import BINPOSTrajectoryFile from mdtraj.formats import XTCTrajectoryFile from mdtraj.formats import TRRTrajectoryFile from mdtraj.formats import HDF5TrajectoryFile from mdtraj.formats import NetCDFTrajectoryFile from mdtraj.formats import LH5TrajectoryFile from mdtraj.formats import PDBTrajectoryFile from mdtraj.formats import MDCRDTrajectoryFile from mdtraj.formats import ArcTrajectoryFile from mdtraj.formats import DTRTrajectoryFile from mdtraj.formats import LAMMPSTrajectoryFile from mdtraj.formats import XYZTrajectoryFile from mdtraj.formats.prmtop import load_prmtop from mdtraj.formats.psf import load_psf from mdtraj.formats.mol2 import load_mol2 from mdtraj.formats.gro import load_gro from mdtraj.core.topology import Topology from mdtraj.core.residue_names import _SOLVENT_TYPES from mdtraj.utils import (ensure_type, in_units_of, lengths_and_angles_to_box_vectors, box_vectors_to_lengths_and_angles, cast_indices, deprecated) from mdtraj.utils.six.moves import xrange from mdtraj.utils.six import PY3, string_types from mdtraj import _rmsd from mdtraj import _FormatRegistry from mdtraj.geometry import distance ############################################################################## # Globals ############################################################################## __all__ = ['open', 'load', 'iterload', 'load_frame', 'Trajectory'] ############################################################################## # Utilities ############################################################################## def _assert_files_exist(filenames): """Throw an IO error if files don't exist Parameters ---------- filenames : {str, [str]} String or list of strings to check """ if isinstance(filenames, string_types): filenames = [filenames] for fn in filenames: if not (os.path.exists(fn) and os.path.isfile(fn)): raise IOError('No such file: %s' % fn) def _assert_files_or_dirs_exist(names): """Throw an IO error if files don't exist Parameters ---------- filenames : {str, [str]} String or list of strings to check """ if isinstance(names, string_types): names = [names] for fn in names: if not (os.path.exists(fn) and \ (os.path.isfile(fn) or os.path.isdir(fn))): raise IOError('No such file: %s' % fn) def _parse_topology(top): """Get the topology from a argument of indeterminate type If top is a string, we try loading a pdb, if its a trajectory we extract its topology. Returns ------- topology : md.Topology """ try: ext = os.path.splitext(top)[1] except: ext = None # might not be a string # supported extensions for constructing topologies extensions = ['.pdb', '.h5','.lh5', '.prmtop', '.parm7', '.psf', '.mol2'] if isinstance(top, string_types) and (ext in ['.pdb', '.h5','.lh5']): _traj = load_frame(top, 0) topology = _traj.topology elif isinstance(top, string_types) and (ext in ['.prmtop', '.parm7']): topology = load_prmtop(top) elif isinstance(top, string_types) and (ext in ['.psf']): topology = load_psf(top) elif isinstance(top, string_types) and (ext in ['.mol2']): topology = load_mol2(top).topology elif isinstance(top, string_types) and (ext in ['.gro']): topology = load_gro(top).topology elif isinstance(top, Trajectory): topology = top.topology elif isinstance(top, Topology): topology = top elif isinstance(top, string_types): raise IOError('The topology is loaded by filename extension, and the ' 'detected "%s" format is not supported. Supported topology ' 'formats include %s and "%s".' % (ext, ', '.join(['"%s"' % e for e in extensions[:-1]]), extensions[-1])) else: raise TypeError('A topology is required. You supplied top=%s' % str(top)) return topology ############################################################################## # Utilities ############################################################################## def open(filename, mode='r', force_overwrite=True, **kwargs): """Open a trajectory file-like object This factor function returns an instance of an open file-like object capable of reading/writing the trajectory (depending on 'mode'). It does not actually load the trajectory from disk or write anything. Parameters ---------- filename : str Path to the trajectory file on disk mode : {'r', 'w'} The mode in which to open the file, either 'r' for read or 'w' for write. force_overwrite : bool If opened in write mode, and a file by the name of `filename` already exists on disk, should we overwrite it? Other Parameters ---------------- kwargs : dict Other keyword parameters are passed directly to the file object Returns ------- fileobject : object Open trajectory file, whose type is determined by the filename extension See Also -------- load, ArcTrajectoryFile, BINPOSTrajectoryFile, DCDTrajectoryFile, HDF5TrajectoryFile, LH5TrajectoryFile, MDCRDTrajectoryFile, NetCDFTrajectoryFile, PDBTrajectoryFile, TRRTrajectoryFile, XTCTrajectoryFile """ extension = os.path.splitext(filename)[1] try: loader = _FormatRegistry.fileobjects[extension] except KeyError: raise IOError('Sorry, no loader for filename=%s (extension=%s) ' 'was found. I can only load files with extensions in %s' % (filename, extension, _FormatRegistry.fileobjects.keys())) return loader(filename, mode=mode, force_overwrite=force_overwrite, **kwargs) def load_frame(filename, index, top=None, atom_indices=None): """Load a single frame from a trajectory file Parameters ---------- filename : str Path to the trajectory file on disk index : int Load the `index`-th frame from the specified file top : {str, Trajectory, Topology} Most trajectory formats do not contain topology information. Pass in either the path to a RCSB PDB file, a trajectory, or a topology to supply this information. atom_indices : array_like, optional If not none, then read only a subset of the atoms coordinates from the file. These indices are zero-based (not 1 based, as used by the PDB format). Examples -------- >>> import mdtraj as md >>> first_frame = md.load_frame('traj.h5', 0) >>> print first_frame <mdtraj.Trajectory with 1 frames, 22 atoms> See Also -------- load, load_frame Returns ------- trajectory : md.Trajectory The resulting conformation, as an md.Trajectory object containing a single frame. """ extension = os.path.splitext(filename)[1] try: loader = _FormatRegistry.loaders[extension] except KeyError: raise IOError('Sorry, no loader for filename=%s (extension=%s) ' 'was found. I can only load files with extensions in %s' % (filename, extension, _FormatRegistry.loaders.keys())) kwargs = {'atom_indices': atom_indices} if loader.__name__ not in ['load_hdf5', 'load_pdb']: kwargs['top'] = top if loader.__name__ not in ['load_dtr']: _assert_files_exist(filename) else: _assert_files_or_dirs_exist(filename) return loader(filename, frame=index, **kwargs) def load(filename_or_filenames, discard_overlapping_frames=False, **kwargs): """Load a trajectory from one or more files on disk. This function dispatches to one of the specialized trajectory loaders based on the extension on the filename. Because different trajectory formats save different information on disk, the specific keyword argument options supported depend on the specific loaded. Parameters ---------- filename_or_filenames : {str, list of strings} Filename or list of filenames containing trajectory files of a single format. discard_overlapping_frames : bool, default=False Look for overlapping frames between the last frame of one filename and the first frame of a subsequent filename and discard them Other Parameters ---------------- top : {str, Trajectory, Topology} Most trajectory formats do not contain topology information. Pass in either the path to a RCSB PDB file, a trajectory, or a topology to supply this information. This option is not required for the .h5, .lh5, and .pdb formats, which already contain topology information. stride : int, default=None Only read every stride-th frame atom_indices : array_like, optional If not none, then read only a subset of the atoms coordinates from the file. This may be slightly slower than the standard read because it requires an extra copy, but will save memory. See Also -------- load_frame, iterload Examples -------- >>> import mdtraj as md >>> traj = md.load('output.xtc', top='topology.pdb') >>> print traj <mdtraj.Trajectory with 500 frames, 423 atoms at 0x110740a90> >>> traj2 = md.load('output.xtc', stride=2, top='topology.pdb') >>> print traj2 <mdtraj.Trajectory with 250 frames, 423 atoms at 0x11136e410> >>> traj3 = md.load_hdf5('output.xtc', atom_indices=[0,1] top='topology.pdb') >>> print traj3 <mdtraj.Trajectory with 500 frames, 2 atoms at 0x18236e4a0> Returns ------- trajectory : md.Trajectory The resulting trajectory, as an md.Trajectory object. """ if "top" in kwargs: # If applicable, pre-loads the topology from PDB for major performance boost. kwargs["top"] = _parse_topology(kwargs["top"]) # grab the extension of the filename if isinstance(filename_or_filenames, string_types): # If a single filename extension = os.path.splitext(filename_or_filenames)[1] filename = filename_or_filenames else: # If multiple filenames, take the first one. extensions = [os.path.splitext(f)[1] for f in filename_or_filenames] if len(set(extensions)) == 0: raise ValueError('No trajectories specified. ' 'filename_or_filenames was an empty list') elif len(set(extensions)) > 1: raise TypeError("Each filename must have the same extension. " "Received: %s" % ', '.join(set(extensions))) else: t = [load(f, **kwargs) for f in filename_or_filenames] # we know the topology is equal because we sent the same topology # kwarg in, so there's no reason to spend extra time checking return t[0].join(t[1:], discard_overlapping_frames=discard_overlapping_frames, check_topology=False) try: #loader = _LoaderRegistry[extension][0] loader = _FormatRegistry.loaders[extension] except KeyError: raise IOError('Sorry, no loader for filename=%s (extension=%s) ' 'was found. I can only load files ' 'with extensions in %s' % (filename, extension, _FormatRegistry.loaders.keys())) if loader.__name__ in ['load_hdf5', 'load_pdb', 'load_lh5']: if 'top' in kwargs: warnings.warn('top= kwarg ignored since file contains topology information') # this is a little hack that makes calling load() more predicable. since # most of the loaders take a kwargs "top" except for load_hdf5, (since # it saves the topology inside the file), we often end up calling # load_hdf5 via this function with the top kwarg specified. but then # there would be a signature binding error. it's easier just to ignore # it. kwargs.pop('top', None) if loader.__name__ not in ['load_dtr']: _assert_files_exist(filename_or_filenames) else: _assert_files_or_dirs_exist(filename_or_filenames) value = loader(filename, **kwargs) return value def iterload(filename, chunk=100, **kwargs): """An iterator over a trajectory from one or more files on disk, in fragments This may be more memory efficient than loading an entire trajectory at once Parameters ---------- filename : str Path to the trajectory file on disk chunk : int Number of frames to load at once from disk per iteration. If 0, load all. Other Parameters ---------------- top : {str, Trajectory, Topology} Most trajectory formats do not contain topology information. Pass in either the path to a RCSB PDB file, a trajectory, or a topology to supply this information. This option is not required for the .h5, .lh5, and .pdb formats, which already contain topology information. stride : int, default=None Only read every stride-th frame. atom_indices : array_like, optional If not none, then read only a subset of the atoms coordinates from the file. This may be slightly slower than the standard read because it requires an extra copy, but will save memory. See Also -------- load, load_frame Examples -------- >>> import mdtraj as md >>> for chunk in md.iterload('output.xtc', top='topology.pdb') ... print chunk <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> """ stride = kwargs.get('stride', 1) atom_indices = cast_indices(kwargs.get('atom_indices', None)) if chunk % stride != 0: raise ValueError('Stride must be a divisor of chunk. stride=%d does not go ' 'evenly into chunk=%d' % (stride, chunk)) if chunk == 0: yield load(filename, **kwargs) else: # If chunk was 0 then we want to avoid filetype-specific code in case of undefined behavior in various file parsers. if filename.endswith('.h5'): if 'top' in kwargs: warnings.warn('top= kwarg ignored since file contains topology information') with HDF5TrajectoryFile(filename) as f: if atom_indices is None: topology = f.topology else: topology = f.topology.subset(atom_indices) while True: data = f.read(chunk*stride, stride=stride, atom_indices=atom_indices) if data == []: raise StopIteration() in_units_of(data.coordinates, f.distance_unit, Trajectory._distance_unit, inplace=True) in_units_of(data.cell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True) yield Trajectory(xyz=data.coordinates, topology=topology, time=data.time, unitcell_lengths=data.cell_lengths, unitcell_angles=data.cell_angles) if filename.endswith('.lh5'): if 'top' in kwargs: warnings.warn('top= kwarg ignored since file contains topology information') with LH5TrajectoryFile(filename) as f: if atom_indices is None: topology = f.topology else: topology = f.topology.subset(atom_indices) ptr = 0 while True: xyz = f.read(chunk*stride, stride=stride, atom_indices=atom_indices) if len(xyz) == 0: raise StopIteration() in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True) time = np.arange(ptr, ptr+len(xyz)*stride, stride) ptr += len(xyz)*stride yield Trajectory(xyz=xyz, topology=topology, time=time) elif filename.endswith('.xtc'): topology = _parse_topology(kwargs.get('top', None)) with XTCTrajectoryFile(filename) as f: while True: xyz, time, step, box = f.read(chunk*stride, stride=stride, atom_indices=atom_indices) if len(xyz) == 0: raise StopIteration() in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True) in_units_of(box, f.distance_unit, Trajectory._distance_unit, inplace=True) trajectory = Trajectory(xyz=xyz, topology=topology, time=time) trajectory.unitcell_vectors = box yield trajectory elif filename.endswith('.dcd'): topology = _parse_topology(kwargs.get('top', None)) with DCDTrajectoryFile(filename) as f: ptr = 0 while True: # for reasons that I have not investigated, dcdtrajectory file chunk and stride # together work like this method, but HDF5/XTC do not. xyz, box_length, box_angle = f.read(chunk, stride=stride, atom_indices=atom_indices) if len(xyz) == 0: raise StopIteration() in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True) in_units_of(box_length, f.distance_unit, Trajectory._distance_unit, inplace=True) time = np.arange(ptr, ptr+len(xyz)*stride, stride) ptr += len(xyz)*stride yield Trajectory(xyz=xyz, topology=topology, time=time, unitcell_lengths=box_length, unitcell_angles=box_angle) else: t = load(filename, **kwargs) for i in range(0, len(t), chunk): yield t[i:i+chunk] class Trajectory(object): """Container object for a molecular dynamics trajectory A Trajectory represents a collection of one or more molecular structures, generally (but not necessarily) from a molecular dynamics trajectory. The Trajectory stores a number of fields describing the system through time, including the cartesian coordinates of each atoms (``xyz``), the topology of the molecular system (``topology``), and information about the unitcell if appropriate (``unitcell_vectors``, ``unitcell_length``, ``unitcell_angles``). A Trajectory should generally be constructed by loading a file from disk. Trajectories can be loaded from (and saved to) the PDB, XTC, TRR, DCD, binpos, NetCDF or MDTraj HDF5 formats. Trajectory supports fancy indexing, so you can extract one or more frames from a Trajectory as a separate trajectory. For example, to form a trajectory with every other frame, you can slice with ``traj[::2]``. Trajectory uses the nanometer, degree & picosecond unit system. Examples -------- >>> # loading a trajectory >>> import mdtraj as md >>> md.load('trajectory.xtc', top='native.pdb') <mdtraj.Trajectory with 1000 frames, 22 atoms at 0x1058a73d0> >>> # slicing a trajectory >>> t = md.load('trajectory.h5') >>> print(t) <mdtraj.Trajectory with 100 frames, 22 atoms> >>> print(t[::2]) <mdtraj.Trajectory with 50 frames, 22 atoms> >>> # calculating the average distance between two atoms >>> import mdtraj as md >>> import numpy as np >>> t = md.load('trajectory.h5') >>> np.mean(np.sqrt(np.sum((t.xyz[:, 0, :] - t.xyz[:, 21, :])**2, axis=1))) See Also -------- mdtraj.load : High-level function that loads files and returns an ``md.Trajectory`` Attributes ---------- n_frames : int n_atoms : int n_residues : int time : np.ndarray, shape=(n_frames,) timestep : float topology : md.Topology top : md.Topology xyz : np.ndarray, shape=(n_frames, n_atoms, 3) unitcell_vectors : {np.ndarray, shape=(n_frames, 3, 3), None} unitcell_lengths : {np.ndarray, shape=(n_frames, 3), None} unitcell_angles : {np.ndarray, shape=(n_frames, 3), None} """ # this is NOT configurable. if it's set to something else, things will break # (thus why I make it private) _distance_unit = 'nanometers' @property def topology(self): """Topology of the system, describing the organization of atoms into residues, bonds, etc Returns ------- topology : md.Topology The topology object, describing the organization of atoms into residues, bonds, etc """ return self._topology @topology.setter def topology(self, value): "Set the topology of the system, describing the organization of atoms into residues, bonds, etc" # todo: more typechecking self._topology = value @property def n_frames(self): """Number of frames in the trajectory Returns ------- n_frames : int The number of frames in the trajectory """ return self._xyz.shape[0] @property def n_atoms(self): """Number of atoms in the trajectory Returns ------- n_atoms : int The number of atoms in the trajectory """ return self._xyz.shape[1] @property def n_residues(self): """Number of residues (amino acids) in the trajectory Returns ------- n_residues : int The number of residues in the trajectory's topology """ if self.top is None: return 0 return sum([1 for r in self.top.residues]) @property def n_chains(self): """Number of chains in the trajectory Returns ------- n_chains : int The number of chains in the trajectory's topology """ if self.top is None: return 0 return sum([1 for c in self.top.chains]) @property def top(self): """Alias for self.topology, describing the organization of atoms into residues, bonds, etc Returns ------- topology : md.Topology The topology object, describing the organization of atoms into residues, bonds, etc """ return self._topology @top.setter def top(self, value): "Set the topology of the system, describing the organization of atoms into residues, bonds, etc" # todo: more typechecking self._topology = value @property def timestep(self): """Timestep between frames, in picoseconds Returns ------- timestep : float The timestep between frames, in picoseconds. """ if self.n_frames <= 1: raise(ValueError("Cannot calculate timestep if trajectory has one frame.")) return self._time[1] - self._time[0] @property def time(self): """The simulation time corresponding to each frame, in picoseconds Returns ------- time : np.ndarray, shape=(n_frames,) The simulation time corresponding to each frame, in picoseconds """ return self._time @time.setter def time(self, value): "Set the simulation time corresponding to each frame, in picoseconds" if isinstance(value, list): value = np.array(value) if np.isscalar(value) and self.n_frames == 1: value = np.array([value]) elif not value.shape == (self.n_frames,): raise ValueError('Wrong shape. Got %s, should be %s' % (value.shape, (self.n_frames))) self._time = value @property def unitcell_vectors(self): """The vectors that define the shape of the unit cell in each frame Returns ------- vectors : np.ndarray, shape(n_frames, 3, 3) Vectors defining the shape of the unit cell in each frame. The semantics of this array are that the shape of the unit cell in frame ``i`` are given by the three vectors, ``value[i, 0, :]``, ``value[i, 1, :]``, and ``value[i, 2, :]``. """ if self._unitcell_lengths is None or self._unitcell_angles is None: return None v1, v2, v3 = lengths_and_angles_to_box_vectors( self._unitcell_lengths[:, 0], # a self._unitcell_lengths[:, 1], # b self._unitcell_lengths[:, 2], # c self._unitcell_angles[:, 0], # alpha self._unitcell_angles[:, 1], # beta self._unitcell_angles[:, 2], # gamma ) return np.swapaxes(np.dstack((v1, v2, v3)), 1, 2) @unitcell_vectors.setter def unitcell_vectors(self, vectors): """Set the three vectors that define the shape of the unit cell Parameters ---------- vectors : tuple of three arrays, each of shape=(n_frames, 3) The semantics of this array are that the shape of the unit cell in frame ``i`` are given by the three vectors, ``value[i, 0, :]``, ``value[i, 1, :]``, and ``value[i, 2, :]``. """ if vectors is None: self._unitcell_lengths = None self._unitcell_angles = None return if not len(vectors) == len(self): raise TypeError('unitcell_vectors must be the same length as ' 'the trajectory. you provided %s' % str(vectors)) v1 = vectors[:, 0, :] v2 = vectors[:, 1, :] v3 = vectors[:, 2, :] a, b, c, alpha, beta, gamma = box_vectors_to_lengths_and_angles(v1, v2, v3) self._unitcell_lengths = np.vstack((a, b, c)).T self._unitcell_angles = np.vstack((alpha, beta, gamma)).T @property def unitcell_volumes(self): """Volumes of unit cell for each frame. Returns ------- volumes : {np.ndarray, shape=(n_frames), None} Volumes of the unit cell in each frame, in nanometers^3, or None if the Trajectory contains no unitcell information. """ if self.unitcell_lengths is not None: return np.array(list(map(np.linalg.det, self.unitcell_vectors))) else: return None @property def unitcell_lengths(self): """Lengths that define the shape of the unit cell in each frame. Returns ------- lengths : {np.ndarray, shape=(n_frames, 3), None} Lengths of the unit cell in each frame, in nanometers, or None if the Trajectory contains no unitcell information. """ return self._unitcell_lengths @property def unitcell_angles(self): """Angles that define the shape of the unit cell in each frame. Returns ------- lengths : np.ndarray, shape=(n_frames, 3) The angles between the three unitcell vectors in each frame, ``alpha``, ``beta``, and ``gamma``. ``alpha' gives the angle between vectors ``b`` and ``c``, ``beta`` gives the angle between vectors ``c`` and ``a``, and ``gamma`` gives the angle between vectors ``a`` and ``b``. The angles are in degrees. """ return self._unitcell_angles @unitcell_lengths.setter def unitcell_lengths(self, value): """Set the lengths that define the shape of the unit cell in each frame Parameters ---------- value : np.ndarray, shape=(n_frames, 3) The distances ``a``, ``b``, and ``c`` that define the shape of the unit cell in each frame, or None """ self._unitcell_lengths = ensure_type(value, np.float32, 2, 'unitcell_lengths', can_be_none=True, shape=(len(self), 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True) @unitcell_angles.setter def unitcell_angles(self, value): """Set the lengths that define the shape of the unit cell in each frame Parameters ---------- value : np.ndarray, shape=(n_frames, 3) The angles ``alpha``, ``beta`` and ``gamma`` that define the shape of the unit cell in each frame. The angles should be in degrees. """ self._unitcell_angles = ensure_type(value, np.float32, 2, 'unitcell_angles', can_be_none=True, shape=(len(self), 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True) @property def xyz(self): """Cartesian coordinates of each atom in each simulation frame Returns ------- xyz : np.ndarray, shape=(n_frames, n_atoms, 3) A three dimensional numpy array, with the cartesian coordinates of each atoms in each frame. """ return self._xyz @xyz.setter def xyz(self, value): "Set the cartesian coordinates of each atom in each simulation frame" if self.top is not None: # if we have a topology and its not None shape = (None, self.topology._numAtoms, 3) else: shape = (None, None, 3) value = ensure_type(value, np.float32, 3, 'xyz', shape=shape, warn_on_cast=False, add_newaxis_on_deficient_ndim=True) self._xyz = value self._rmsd_traces = None def _string_summary_basic(self): """Basic summary of traj in string form.""" unitcell_str = 'and unitcells' if self._have_unitcell else 'without unitcells' value = "mdtraj.Trajectory with %d frames, %d atoms, %d residues, %s" % ( self.n_frames, self.n_atoms, self.n_residues, unitcell_str) return value def __len__(self): return self.n_frames def __add__(self, other): "Concatenate two trajectories" return self.join(other) def __str__(self): return "<%s>" % (self._string_summary_basic()) def __repr__(self): return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self)) # def describe(self): # """Diagnostic summary statistics on the trajectory""" # # What information do we want to display? # # Goals: easy to figure out if a trajectory is blowing up or contains # # bad data, easy to diagonose other problems. Generally give a # # high-level description of the data in the trajectory. # # Possibly show std. dev. of differnt coordinates in the trajectory # # or maybe its RMSD drift or something? # # Also, check for any NaNs or Infs in the data. Or other common issues # # like that? # # Note that pandas.DataFrame has a describe() method, which gives # # min/max/mean/std.dev./percentiles of each column in a DataFrame. # raise NotImplementedError() def superpose(self, reference, frame=0, atom_indices=None, parallel=True): """Superpose each conformation in this trajectory upon a reference Parameters ---------- reference : md.Trajectory Align self to a particular frame in `reference` frame : int The index of the conformation in `reference` to align to. atom_indices : array_like, or None The indices of the atoms to superpose. If not supplied, all atoms will be used. parallel : bool Use OpenMP to run the superposition in parallel over multiple cores Returns ------- self """ if atom_indices is None: atom_indices = slice(None) n_frames = self.xyz.shape[0] self_align_xyz = np.asarray(self.xyz[:, atom_indices, :], order='c') self_displace_xyz = np.asarray(self.xyz, order='c') ref_align_xyz = np.array(reference.xyz[frame, atom_indices, :], copy=True, order='c').reshape(1, -1, 3) offset = np.mean(self_align_xyz, axis=1, dtype=np.float64).reshape(n_frames, 1, 3) self_align_xyz -= offset if self_align_xyz.ctypes.data != self_displace_xyz.ctypes.data: # when atom_indices is None, these two arrays alias the same memory # so we only need to do the centering once self_displace_xyz -= offset ref_offset = ref_align_xyz[0].astype('float64').mean(0) ref_align_xyz[0] -= ref_offset self_g = np.einsum('ijk,ijk->i', self_align_xyz, self_align_xyz) ref_g = np.einsum('ijk,ijk->i', ref_align_xyz , ref_align_xyz) _rmsd.superpose_atom_major( ref_align_xyz, self_align_xyz, ref_g, self_g, self_displace_xyz, 0, parallel=parallel) self_displace_xyz += ref_offset self.xyz = self_displace_xyz return self def join(self, other, check_topology=True, discard_overlapping_frames=False): """Join two trajectories together along the time/frame axis. This method joins trajectories along the time axis, giving a new trajectory of length equal to the sum of the lengths of `self` and `other`. It can also be called by using `self + other` Parameters ---------- other : Trajectory or list of Trajectory One or more trajectories to join with this one. These trajectories are *appended* to the end of this trajectory. check_topology : bool Ensure that the topology of `self` and `other` are identical before joining them. If false, the resulting trajectory will have the topology of `self`. discard_overlapping_frames : bool, optional If True, compare coordinates at trajectory edges to discard overlapping frames. Default: False. See Also -------- stack : join two trajectories along the atom axis """ if isinstance(other, Trajectory): other = [other] if isinstance(other, list): if not all(isinstance(o, Trajectory) for o in other): raise TypeError('You can only join Trajectory instances') if not all(self.n_atoms == o.n_atoms for o in other): raise ValueError('Number of atoms in self (%d) is not equal ' 'to number of atoms in other' % (self.n_atoms)) if check_topology and not all(self.topology == o.topology for o in other): raise ValueError('The topologies of the Trajectories are not the same') if not all(self._have_unitcell == o._have_unitcell for o in other): raise ValueError('Mixing trajectories with and without unitcell') else: raise TypeError('`other` must be a list of Trajectory. You supplied %d' % type(other)) # list containing all of the trajs to merge, including self trajectories = [self] + other if discard_overlapping_frames: for i in range(len(trajectories)-1): # last frame of trajectory i x0 = trajectories[i].xyz[-1] # first frame of trajectory i+1 x1 = trajectories[i + 1].xyz[0] # check that all atoms are within 2e-3 nm # (this is kind of arbitrary) if np.all(np.abs(x1 - x0) < 2e-3): trajectories[i] = trajectories[i][:-1] xyz = np.concatenate([t.xyz for t in trajectories]) time = np.concatenate([t.time for t in trajectories]) angles = lengths = None if self._have_unitcell: angles = np.concatenate([t.unitcell_angles for t in trajectories]) lengths = np.concatenate([t.unitcell_lengths for t in trajectories]) # use this syntax so that if you subclass Trajectory, # the subclass's join() will return an instance of the subclass return self.__class__(xyz, deepcopy(self._topology), time=time, unitcell_lengths=lengths, unitcell_angles=angles) def stack(self, other): """Stack two trajectories along the atom axis This method joins trajectories along the atom axis, giving a new trajectory with a number of atoms equal to the sum of the number of atoms in `self` and `other`. Notes ----- The resulting trajectory will have the unitcell and time information the left operand. Examples -------- >>> t1 = md.load('traj1.h5') >>> t2 = md.load('traj2.h5') >>> # even when t2 contains no unitcell information >>> t2.unitcell_vectors = None >>> stacked = t1.stack(t2) >>> # the stacked trajectory inherits the unitcell information >>> # from the first trajectory >>> np.all(stacked.unitcell_vectors == t1.unitcell_vectors) True Parameters ---------- other : Trajectory The other trajectory to join See Also -------- join : join two trajectories along the time/frame axis. """ if not isinstance(other, Trajectory): raise TypeError('You can only stack two Trajectory instances') if self.n_frames != other.n_frames: raise ValueError('Number of frames in self (%d) is not equal ' 'to number of frames in other (%d)' % (self.n_frames, other.n_frames)) if self.topology is not None: topology = self.topology.join(other.topology) else: topology = None xyz = np.hstack((self.xyz, other.xyz)) return self.__class__(xyz=xyz, topology=topology, unitcell_angles=self.unitcell_angles, unitcell_lengths=self.unitcell_lengths, time=self.time) def __getitem__(self, key): "Get a slice of this trajectory" return self.slice(key) def slice(self, key, copy=True): """Slice trajectory, by extracting one or more frames into a separate object This method can also be called using index bracket notation, i.e `traj[1] == traj.slice(1)` Parameters ---------- key : {int, np.ndarray, slice} The slice to take. Can be either an int, a list of ints, or a slice object. copy : bool, default=True Copy the arrays after slicing. If you set this to false, then if you modify a slice, you'll modify the original array since they point to the same data. """ xyz = self.xyz[key] time = self.time[key] unitcell_lengths, unitcell_angles = None, None if self.unitcell_angles is not None: unitcell_angles = self.unitcell_angles[key] if self.unitcell_lengths is not None: unitcell_lengths = self.unitcell_lengths[key] if copy: xyz = xyz.copy() time = time.copy() topology = deepcopy(self._topology) if self.unitcell_angles is not None: unitcell_angles = unitcell_angles.copy() if self.unitcell_lengths is not None: unitcell_lengths = unitcell_lengths.copy() newtraj = self.__class__( xyz, topology, time, unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles) if self._rmsd_traces is not None: newtraj._rmsd_traces = np.array(self._rmsd_traces[key], ndmin=1, copy=True) return newtraj def __init__(self, xyz, topology, time=None, unitcell_lengths=None, unitcell_angles=None): # install the topology into the object first, so that when setting # the xyz, we can check that it lines up (e.g. n_atoms), with the topology self.topology = topology self.xyz = xyz # _rmsd_traces are the inner product of each centered conformation, # which are required for computing RMSD. Normally these values are # calculated on the fly in the cython code (rmsd/_rmsd.pyx), but # optionally, we enable the use precomputed values which can speed # up the calculation (useful for clustering), but potentially be unsafe # if self._xyz is modified without a corresponding change to # self._rmsd_traces. This array is populated computed by # center_conformations, and no other methods should really touch it. self._rmsd_traces = None # box has no default, it'll just be none normally self.unitcell_lengths = unitcell_lengths self.unitcell_angles = unitcell_angles # time will take the default 1..N if time is None: time = np.arange(len(self.xyz)) self.time = time if (topology is not None) and (topology._numAtoms != self.n_atoms): raise ValueError("Number of atoms in xyz (%s) and " "in topology (%s) don't match" % (self.n_atoms, topology._numAtoms)) def openmm_positions(self, frame): """OpenMM-compatable positions of a single frame. Examples -------- >>> t = md.load('trajectory.h5') >>> context.setPositions(t.openmm_positions(0)) Parameters ---------- frame : int The index of frame of the trajectory that you wish to extract Returns ------- positions : list The cartesian coordinates of specific trajectory frame, formatted for input to OpenMM """ from simtk.openmm import Vec3 from simtk.unit import nanometer Pos = [] for xyzi in self.xyz[frame]: Pos.append(Vec3(xyzi[0], xyzi[1], xyzi[2])) return Pos * nanometer def openmm_boxes(self, frame): """OpenMM-compatable box vectors of a single frame. Examples -------- >>> t = md.load('trajectory.h5') >>> context.setPeriodicBoxVectors(t.openmm_positions(0)) Parameters ---------- frame : int Return box for this single frame. Returns ------- box : tuple The periodic box vectors for this frame, formatted for input to OpenMM. """ from simtk.openmm import Vec3 from simtk.unit import nanometer vectors = self.unitcell_vectors[frame] if vectors is None: raise ValueError("this trajectory does not contain box size information") v1, v2, v3 = vectors return (Vec3(*v1), Vec3(*v2), Vec3(*v3)) * nanometer @staticmethod # im not really sure if the load function should be just a function or a method on the class # so effectively, lets make it both? def load(filenames, **kwargs): """Load a trajectory from disk Parameters ---------- filenames : {str, [str]} Either a string or list of strings Other Parameters ---------------- As requested by the various load functions -- it depends on the extension """ return load(filenames, **kwargs) def save(self, filename, **kwargs): """Save trajectory to disk, in a format determined by the filename extension Parameters ---------- filename : str filesystem path in which to save the trajectory. The extension will be parsed and will control the format. Other Parameters ---------------- lossy : bool For .h5 or .lh5, whether or not to use compression. no_models: bool For .pdb. TODO: Document this? force_overwrite : bool For .binpos, .xtc, .dcd. If `filename` already exists, overwrite it. """ # grab the extension of the filename extension = os.path.splitext(filename)[1] savers = {'.xtc': self.save_xtc, '.trr': self.save_trr, '.pdb': self.save_pdb, '.dcd': self.save_dcd, '.h5': self.save_hdf5, '.binpos': self.save_binpos, '.nc': self.save_netcdf, '.netcdf': self.save_netcdf, '.crd': self.save_mdcrd, '.mdcrd': self.save_mdcrd, '.ncdf': self.save_netcdf, '.lh5': self.save_lh5, '.lammpstrj': self.save_lammpstrj, '.xyz': self.save_xyz, } try: saver = savers[extension] except KeyError: raise IOError('Sorry, no saver for filename=%s (extension=%s) ' 'was found. I can only save files ' 'with extensions in %s' % (filename, extension, savers.keys())) # run the saver, and return whatever output it gives return saver(filename, **kwargs) def save_hdf5(self, filename, force_overwrite=True): """Save trajectory to MDTraj HDF5 format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with HDF5TrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(coordinates=self.xyz, time=self.time, cell_angles=self.unitcell_angles, cell_lengths=self.unitcell_lengths) f.topology = self.topology def save_lammpstrj(self, filename, force_overwrite=True): """Save trajectory to LAMMPS custom dump format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with LAMMPSTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=self.xyz, cell_angles=self.unitcell_angles, cell_lengths=self.unitcell_lengths) def save_xyz(self, filename, force_overwrite=True): """Save trajectory to .xyz format. Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with XYZTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=self.xyz, types=[a.name for a in self.top.atoms]) def save_pdb(self, filename, force_overwrite=True, bfactors=None): """Save trajectory to RCSB PDB format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there bfactors : array_like, default=None, shape=(n_frames, n_atoms) or (n_atoms,) Save bfactors with pdb file. If the array is two dimensional it should contain a bfactor for each atom in each frame of the trajectory. Otherwise, the same bfactor will be saved in each frame. """ self._check_valid_unitcell() if not bfactors is None: if len(np.array(bfactors).shape) == 1: if len(bfactors) != self.n_atoms: raise ValueError("bfactors %s should be shaped as (n_frames, n_atoms) or (n_atoms,)" % str(np.array(bfactors).shape)) bfactors = [bfactors] * self.n_frames else: if np.array(bfactors).shape != (self.n_frames, self.n_atoms): raise ValueError("bfactors %s should be shaped as (n_frames, n_atoms) or (n_atoms,)" % str(np.array(bfactors).shape)) else: bfactors = [None] * self.n_frames with PDBTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: for i in xrange(self.n_frames): if self._have_unitcell: f.write(in_units_of(self._xyz[i], Trajectory._distance_unit, f.distance_unit), self.topology, modelIndex=i, bfactors=bfactors[i], unitcell_lengths=in_units_of(self.unitcell_lengths[i], Trajectory._distance_unit, f.distance_unit), unitcell_angles=self.unitcell_angles[i]) else: f.write(in_units_of(self._xyz[i], Trajectory._distance_unit, f.distance_unit), self.topology, modelIndex=i, bfactors=bfactors[i]) def save_xtc(self, filename, force_overwrite=True): """Save trajectory to Gromacs XTC format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with XTCTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=self.xyz, time=self.time, box=self.unitcell_vectors) def save_trr(self, filename, force_overwrite=True): """Save trajectory to Gromacs TRR format Notes ----- Only the xyz coordinates and the time are saved, the velocities and forces in the trr will be zeros Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with TRRTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=self.xyz, time=self.time, box=self.unitcell_vectors) def save_dcd(self, filename, force_overwrite=True): """Save trajectory to CHARMM/NAMD DCD format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filenames, if its already there """ self._check_valid_unitcell() with DCDTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles) def save_dtr(self, filename, force_overwrite=True): """Save trajectory to DESMOND DTR format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filenames, if its already there """ self._check_valid_unitcell() with DTRTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles, times=self.time) def save_binpos(self, filename, force_overwrite=True): """Save trajectory to AMBER BINPOS format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with BINPOSTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit)) def save_mdcrd(self, filename, force_overwrite=True): """Save trajectory to AMBER mdcrd format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ self._check_valid_unitcell() if self._have_unitcell: if not np.all(self.unitcell_angles == 90): raise ValueError('Only rectilinear boxes can be saved to mdcrd files') with MDCRDTrajectoryFile(filename, mode='w', force_overwrite=force_overwrite) as f: f.write(in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit)) def save_netcdf(self, filename, force_overwrite=True): """Save trajectory in AMBER NetCDF format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ self._check_valid_unitcell() with NetCDFTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(coordinates=in_units_of(self._xyz, Trajectory._distance_unit, NetCDFTrajectoryFile.distance_unit), time=self.time, cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles) def save_lh5(self, filename): """Save trajectory in deprecated MSMBuilder2 LH5 (lossy HDF5) format. Parameters ---------- filename : str filesystem path in which to save the trajectory """ with LH5TrajectoryFile(filename, 'w', force_overwrite=True) as f: f.write(coordinates=self.xyz) f.topology = self.topology def center_coordinates(self, mass_weighted=False): """Center each trajectory frame at the origin (0,0,0). This method acts inplace on the trajectory. The centering can be either uniformly weighted (mass_weighted=False) or weighted by the mass of each atom (mass_weighted=True). Parameters ---------- mass_weighted : bool, optional (default = False) If True, weight atoms by mass when removing COM. Returns ------- self """ if mass_weighted and self.top is not None: self.xyz -= distance.compute_center_of_mass(self)[:, np.newaxis, :] else: self._rmsd_traces = _rmsd._center_inplace_atom_major(self._xyz) return self @deprecated('restrict_atoms was replaced by atom_slice and will be removed in 2.0') def restrict_atoms(self, atom_indices, inplace=True): """Retain only a subset of the atoms in a trajectory Deletes atoms not in `atom_indices`, and re-indexes those that remain Parameters ---------- atom_indices : array-like, dtype=int, shape=(n_atoms) List of atom indices to keep. inplace : bool, default=True If ``True``, the operation is done inplace, modifying ``self``. Otherwise, a copy is returned with the restricted atoms, and ``self`` is not modified. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. """ return self.atom_slice(atom_indices, inplace=inplace) def atom_slice(self, atom_indices, inplace=False): """Create a new trajectory from a subset of atoms Parameters ---------- atom_indices : array-like, dtype=int, shape=(n_atoms) List of indices of atoms to retain in the new trajectory. inplace : bool, default=False If ``True``, the operation is done inplace, modifying ``self``. Otherwise, a copy is returned with the sliced atoms, and ``self`` is not modified. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. See Also -------- stack : stack multiple trajectories along the atom axis """ xyz = np.array(self.xyz[:, atom_indices], order='C') topology = None if self._topology is not None: topology = self._topology.subset(atom_indices) if inplace: if self._topology is not None: self._topology = topology self._xyz = xyz return self unitcell_lengths = unitcell_angles = None if self._have_unitcell: unitcell_lengths = self._unitcell_lengths.copy() unitcell_angles = self._unitcell_angles.copy() time = self._time.copy() return Trajectory(xyz=xyz, topology=topology, time=time, unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles) def remove_solvent(self, exclude=None, inplace=False): """ Create a new trajectory without solvent atoms Parameters ---------- exclude : array-like, dtype=str, shape=(n_solvent_types) List of solvent residue names to retain in the new trajectory. inplace : bool, default=False The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. """ solvent_types = list(_SOLVENT_TYPES) if exclude is not None: if isinstance(exclude, str): raise TypeError('exclude must be array-like') if not isinstance(exclude, Iterable): raise TypeError('exclude is not iterable') for type in exclude: if type not in solvent_types: raise ValueError(type + 'is not a valid solvent type') solvent_types.remove(type) atom_indices = [atom.index for atom in self.topology.atoms if atom.residue.name not in solvent_types] return self.atom_slice(atom_indices, inplace = inplace) def _check_valid_unitcell(self): """Do some sanity checking on self.unitcell_lengths and self.unitcell_angles """ if self.unitcell_lengths is not None and self.unitcell_angles is None: raise AttributeError('unitcell length data exists, but no angles') if self.unitcell_lengths is None and self.unitcell_angles is not None: raise AttributeError('unitcell angles data exists, but no lengths') if self.unitcell_lengths is not None and np.any(self.unitcell_lengths < 0): raise ValueError('unitcell length < 0') if self.unitcell_angles is not None and np.any(self.unitcell_angles < 0): raise ValueError('unitcell angle < 0') @property def _have_unitcell(self): return self._unitcell_lengths is not None and self._unitcell_angles is not None
kyleabeauchamp/mdtraj
mdtraj/core/trajectory.py
Python
lgpl-2.1
61,694
[ "Amber", "CHARMM", "Desmond", "Gromacs", "LAMMPS", "MDTraj", "NAMD", "NetCDF", "OpenMM" ]
d9aa71d098b3f192aa8ceeabfddb05d6fffeefad0f9d36456cd26a3c46e5e7de
#!/usr/local/bin/python #CHIPSEC: Platform Security Assessment Framework #Copyright (c) 2010-2015, Intel Corporation # #This program is free software; you can redistribute it and/or #modify it under the terms of the GNU General Public License #as published by the Free Software Foundation; Version 2. # #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with this program; if not, write to the Free Software #Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # #Contact information: #chipsec@intel.com # """ .. note:: THIS FILE WAS GENERATED Auto generated from: http://www.pcidatabase.com/vendors.php?sort=id http://www.pcidatabase.com/reports.php?type=csv """ VENDORS = { 0x0033 : "Paradyne Corp.", 0x003D : "master", 0x0070 : "Hauppauge Computer Works Inc.", 0x0100 : "USBPDO-8", 0x0123 : "General Dynamics", 0x0315 : "SK - Electronics Co., Ltd.", 0x0402 : " Acer aspire one", 0x046D : "Logitech Inc.", 0x0483 : "UPEK", 0x04A9 : "Canon", 0x04B3 : "IBM", 0x04D9 : "Filco", 0x04F2 : "Chicony Electronics Co. ", 0x051D : "ACPI\VEN_INT&amp;DEV_33A0", 0x0529 : "Aladdin E-Token", 0x0553 : " Aiptek USA", 0x058f : "Alcor Micro Corp.", 0x0590 : "Omron Corp", 0x05ac : "Apple Inc.", 0x05E1 : "D-MAX", 0x064e : "SUYIN Corporation", 0x067B : "Prolific Technology Inc.", 0x06FE : "Acresso Software Inc.", 0x0711 : "SIIG, Inc.", 0x093a : "KYE Systems Corp. / Pixart Imaging", 0x096E : "USB Rockey dongle from Feitain ", 0x0A5C : "Broadcom Corporation", 0x0A89 : "BREA Technologies Inc.", 0x0A92 : "Egosys, Inc.", 0x0AC8 : "ASUS ", 0x0b05 : "Toshiba Bluetooth RFBUS, RFCOM, RFHID", 0x0c45 : "Microdia Ltd.", 0x0cf3 : "TP-Link", 0x0D2E : "Feedback Instruments Ltd.", 0x0D8C : "C-Media Electronics, Inc.", 0x0DF6 : "Sitecom", 0x0E11 : "Compaq Computer Corp.", 0x0E8D : "MediaTek Inc.", 0x1000 : "LSI Logic", 0x1001 : "Kolter Electronic - Germany", 0x1002 : "Advanced Micro Devices, Inc.", 0x1003 : "ULSI", 0x1004 : "VLSI Technology", 0x1006 : "Reply Group", 0x1007 : "Netframe Systems Inc.", 0x1008 : "Epson", 0x100A : "as Ltd. de Phoenix del de Tecnolog", 0x100B : "National Semiconductors", 0x100C : "Tseng Labs", 0x100D : "AST Research", 0x100E : "Weitek", 0x1010 : "Video Logic Ltd.", 0x1011 : "Digital Equipment Corporation", 0x1012 : "Micronics Computers Inc.", 0x1013 : "Cirrus Logic", 0x1014 : "International Business Machines Corp.", 0x1016 : "Fujitsu ICL Computers", 0x1017 : "Spea Software AG", 0x1018 : "Unisys Systems", 0x1019 : "Elitegroup Computer System", 0x101A : "NCR Corporation", 0x101B : "Vitesse Semiconductor", 0x101E : "American Megatrends Inc.", 0x101F : "PictureTel Corp.", 0x1020 : "Hitachi Computer Electronics", 0x1021 : "Oki Electric Industry", 0x1022 : "Advanced Micro Devices", 0x1023 : "TRIDENT MICRO", 0x1025 : "Acer Incorporated", 0x1028 : "Dell Inc.", 0x102A : "LSI Logic Headland Division", 0x102B : "Matrox Electronic Systems Ltd.", 0x102C : "Asiliant (Chips And Technologies)", 0x102D : "Wyse Technology", 0x102E : "Olivetti Advanced Technology", 0x102F : "Toshiba America", 0x1030 : "TMC Research", 0x1031 : "miro Computer Products AG", 0x1033 : "NEC Electronics", 0x1034 : "Burndy Corporation", 0x1036 : "Future Domain", 0x1037 : "Hitachi Micro Systems Inc", 0x1038 : "AMP Incorporated", 0x1039 : "Silicon Integrated Systems", 0x103A : "Seiko Epson Corporation", 0x103B : "Tatung Corp. Of America", 0x103C : "Hewlett-Packard", 0x103E : "Solliday Engineering", 0x103F : "Logic Modeling", 0x1041 : "Computrend", 0x1043 : "Asustek Computer Inc.", 0x1044 : "Distributed Processing Tech", 0x1045 : "OPTi Inc.", 0x1046 : "IPC Corporation LTD", 0x1047 : "Genoa Systems Corp.", 0x1048 : "ELSA GmbH", 0x1049 : "Fountain Technology", 0x104A : "STMicroelectronics", 0x104B : "Mylex / Buslogic", 0x104C : "Texas Instruments", 0x104D : "Sony Corporation", 0x104E : "Oak Technology", 0x104F : "Co-Time Computer Ltd.", 0x1050 : "Winbond Electronics Corp.", 0x1051 : "Anigma Corp.", 0x1053 : "Young Micro Systems", 0x1054 : "Hitachi Ltd", 0x1055 : "Standard Microsystems Corp.", 0x1056 : "ICL", 0x1057 : "Motorola", 0x1058 : "Electronics &amp; Telecommunication Res", 0x1059 : "Kontron Canada", 0x105A : "Promise Technology", 0x105B : "Mobham chip", 0x105C : "Wipro Infotech Limited", 0x105D : "Number Nine Visual Technology", 0x105E : "Vtech Engineering Canada Ltd.", 0x105F : "Infotronic America Inc.", 0x1060 : "United Microelectronics", 0x1061 : "8x8 Inc.", 0x1062 : "Maspar Computer Corp.", 0x1063 : "Ocean Office Automation", 0x1064 : "Alcatel Cit", 0x1065 : "Texas Microsystems", 0x1066 : "Picopower Technology", 0x1067 : "Mitsubishi Electronics", 0x1068 : "Diversified Technology", 0x106A : "Aten Research Inc.", 0x106B : "Apple Inc.", 0x106C : "Hyundai Electronics America", 0x106D : "Sequent Computer Systems", 0x106E : "DFI Inc.", 0x106F : "City Gate Development LTD", 0x1070 : "Daewoo Telecom Ltd.", 0x1071 : "Mitac", 0x1072 : "GIT Co. Ltd.", 0x1073 : "Yamaha Corporation", 0x1074 : "Nexgen Microsystems", 0x1075 : "Advanced Integration Research", 0x1077 : "QLogic Corporation", 0x1078 : "Cyrix Corporation", 0x1079 : "I-Bus", 0x107A : "Networth controls", 0x107B : "Gateway 2000", 0x107C : "Goldstar Co. Ltd.", 0x107D : "Leadtek Research", 0x107E : "Testernec", 0x107F : "Data Technology Corporation", 0x1080 : "Cypress Semiconductor", 0x1081 : "Radius Inc.", 0x1082 : "EFA Corporation Of America", 0x1083 : "Forex Computer Corporation", 0x1084 : "Parador", 0x1085 : "Tulip Computers Int'l BV", 0x1086 : "J. Bond Computer Systems", 0x1087 : "Cache Computer", 0x1088 : "Microcomputer Systems (M) Son", 0x1089 : "Data General Corporation", 0x108A : "SBS Operations", 0x108C : "Oakleigh Systems Inc.", 0x108D : "Olicom", 0x108E : "Sun Microsystems", 0x108F : "Systemsoft Corporation", 0x1090 : "Encore Computer Corporation", 0x1091 : "Intergraph Corporation", 0x1092 : "Diamond Computer Systems", 0x1093 : "National Instruments", 0x1094 : "Apostolos", 0x1095 : "Silicon Image, Inc.", 0x1096 : "Alacron", 0x1097 : "Appian Graphics", 0x1098 : "Quantum Designs Ltd.", 0x1099 : "Samsung Electronics Co. Ltd.", 0x109A : "Packard Bell", 0x109B : "Gemlight Computer Ltd.", 0x109C : "Megachips Corporation", 0x109D : "Zida Technologies Ltd.", 0x109E : "Brooktree Corporation", 0x109F : "Trigem Computer Inc.", 0x10A0 : "Meidensha Corporation", 0x10A1 : "Juko Electronics Inc. Ltd.", 0x10A2 : "Quantum Corporation", 0x10A3 : "Everex Systems Inc.", 0x10A4 : "Globe Manufacturing Sales", 0x10A5 : "Racal Interlan", 0x10A8 : "Sierra Semiconductor", 0x10A9 : "Silicon Graphics", 0x10AB : "Digicom", 0x10AC : "Honeywell IASD", 0x10AD : "Winbond Systems Labs", 0x10AE : "Cornerstone Technology", 0x10AF : "Micro Computer Systems Inc.", 0x10B0 : "Gainward GmbH ", 0x10B1 : "Cabletron Systems Inc.", 0x10B2 : "Raytheon Company", 0x10B3 : "Databook Inc.", 0x10B4 : "STB Systems", 0x10B5 : "PLX Technology Inc.", 0x10B6 : "Madge Networks", 0x10B7 : "3Com Corporation", 0x10B8 : "Standard Microsystems Corporation", 0x10B9 : "Ali Corporation", 0x10BA : "Mitsubishi Electronics Corp.", 0x10BB : "Dapha Electronics Corporation", 0x10BC : "Advanced Logic Research Inc.", 0x10BD : "Surecom Technology", 0x10BE : "Tsenglabs International Corp.", 0x10BF : "MOST Corp.", 0x10C0 : "Boca Research Inc.", 0x10C1 : "ICM Corp. Ltd.", 0x10C2 : "Auspex Systems Inc.", 0x10C3 : "Samsung Semiconductors", 0x10C4 : "Award Software Int'l Inc.", 0x10C5 : "Xerox Corporation", 0x10C6 : "Rambus Inc.", 0x10C8 : "Neomagic Corporation", 0x10C9 : "Dataexpert Corporation", 0x10CA : "Fujitsu Siemens", 0x10CB : "Omron Corporation", 0x10CD : "Advanced System Products", 0x10CF : "Fujitsu Ltd.", 0x10D1 : "Future+ Systems", 0x10D2 : "Molex Incorporated", 0x10D3 : "Jabil Circuit Inc.", 0x10D4 : "Hualon Microelectronics", 0x10D5 : "Autologic Inc.", 0x10D6 : "Wilson .co .ltd", 0x10D7 : "BCM Advanced Research", 0x10D8 : "Advanced Peripherals Labs", 0x10D9 : "Macronix International Co. Ltd.", 0x10DB : "Rohm Research", 0x10DC : "CERN-European Lab. for Particle Physics", 0x10DD : "Evans &amp; Sutherland", 0x10DE : "NVIDIA", 0x10DF : "Emulex Corporation", 0x10E1 : "Tekram Technology Corp. Ltd.", 0x10E2 : "Aptix Corporation", 0x10E3 : "Tundra Semiconductor Corp.", 0x10E4 : "Tandem Computers", 0x10E5 : "Micro Industries Corporation", 0x10E6 : "Gainbery Computer Products Inc.", 0x10E7 : "Vadem", 0x10E8 : "Applied Micro Circuits Corp.", 0x10E9 : "Alps Electronic Corp. Ltd.", 0x10EA : "Tvia, Inc.", 0x10EB : "Artist Graphics", 0x10EC : "Realtek Semiconductor Corp.", 0x10ED : "Ascii Corporation", 0x10EE : "Xilinx Corporation", 0x10EF : "Racore Computer Products", 0x10F0 : "Curtiss-Wright Controls Embedded Computing", 0x10F1 : "Tyan Computer", 0x10F2 : "Achme Computer Inc. - GONE !!!!", 0x10F3 : "Alaris Inc.", 0x10F4 : "S-Mos Systems", 0x10F5 : "NKK Corporation", 0x10F6 : "Creative Electronic Systems SA", 0x10F7 : "Matsushita Electric Industrial Corp.", 0x10F8 : "Altos India Ltd.", 0x10F9 : "PC Direct", 0x10FA : "Truevision", 0x10FB : "Thesys Microelectronic's", 0x10FC : "I-O Data Device Inc.", 0x10FD : "Soyo Technology Corp. Ltd.", 0x10FE : "Fast Electronic GmbH", 0x10FF : "Ncube", 0x1100 : "Jazz Multimedia", 0x1101 : "Initio Corporation", 0x1102 : "Creative Technology LTD.", 0x1103 : " HighPoint Technologies, Inc.", 0x1104 : "Rasterops", 0x1105 : "Sigma Designs Inc.", 0x1106 : "VIA Technologies, Inc.", 0x1107 : "Stratus Computer", 0x1108 : "Proteon Inc.", 0x1109 : "Adaptec/Cogent Data Technologies", 0x110A : "Siemens AG", 0x110B : "Chromatic Research Inc", 0x110C : "Mini-Max Technology Inc.", 0x110D : "ZNYX Corporation", 0x110E : "CPU Technology", 0x110F : "Ross Technology", 0x1112 : "Osicom Technologies Inc.", 0x1113 : "Accton Technology Corporation", 0x1114 : "Atmel Corp.", 0x1116 : "Data Translation, Inc.", 0x1117 : "Datacube Inc.", 0x1118 : "Berg Electronics", 0x1119 : "ICP vortex Computersysteme GmbH", 0x111A : "Efficent Networks", 0x111C : "Tricord Systems Inc.", 0x111D : "Integrated Device Technology Inc.", 0x111F : "Precision Digital Images", 0x1120 : "EMC Corp.", 0x1121 : "Zilog", 0x1123 : "Excellent Design Inc.", 0x1124 : "Leutron Vision AG", 0x1125 : "Eurocore/Vigra", 0x1127 : "FORE Systems", 0x1129 : "Firmworks", 0x112A : "Hermes Electronics Co. Ltd.", 0x112C : "Zenith Data Systems", 0x112D : "Ravicad", 0x112E : "Infomedia", 0x1130 : "Computervision", 0x1131 : "NXP Semiconductors N.V.", 0x1132 : "Mitel Corp.", 0x1133 : "Eicon Networks Corporation", 0x1134 : "Mercury Computer Systems Inc.", 0x1135 : "Fuji Xerox Co Ltd", 0x1136 : "Momentum Data Systems", 0x1137 : "Cisco Systems Inc", 0x1138 : "Ziatech Corporation", 0x1139 : "Dynamic Pictures Inc", 0x113A : "FWB Inc", 0x113B : "Network Computing Devices", 0x113C : "Cyclone Microsystems Inc.", 0x113D : "Leading Edge Products Inc", 0x113E : "Sanyo Electric Co", 0x113F : "Equinox Systems", 0x1140 : "Intervoice Inc", 0x1141 : "Crest Microsystem Inc", 0x1142 : "Alliance Semiconductor", 0x1143 : "Netpower Inc", 0x1144 : "Cincinnati Milacron", 0x1145 : "Workbit Corp", 0x1146 : "Force Computers", 0x1147 : "Interface Corp", 0x1148 : "Marvell Semiconductor Germany GmbH", 0x1149 : "Win System Corporation", 0x114A : "VMIC", 0x114B : "Canopus corporation", 0x114C : "Annabooks", 0x114D : "IC Corporation", 0x114E : "Nikon Systems Inc", 0x114F : "Digi International", 0x1150 : "Thinking Machines Corporation", 0x1151 : "JAE Electronics Inc.", 0x1153 : "Land Win Electronic Corp", 0x1154 : "Melco Inc", 0x1155 : "Pine Technology Ltd", 0x1156 : "Periscope Engineering", 0x1157 : "Avsys Corporation", 0x1158 : "Voarx R&amp;D Inc", 0x1159 : "Mutech", 0x115A : "Harlequin Ltd", 0x115B : "Parallax Graphics", 0x115C : "Photron Ltd.", 0x115D : "Xircom", 0x115E : "Peer Protocols Inc", 0x115F : "Maxtor Corporation", 0x1160 : "Megasoft Inc", 0x1161 : "PFU Ltd", 0x1162 : "OA Laboratory Co Ltd", 0x1163 : "mohamed alsherif", 0x1164 : "Advanced Peripherals Tech", 0x1165 : "Imagraph Corporation", 0x1166 : "Broadcom / ServerWorks", 0x1167 : "Mutoh Industries Inc", 0x1168 : "Thine Electronics Inc", 0x1169 : "Centre f/Dev. of Adv. Computing", 0x116A : "Luminex Software, Inc", 0x116B : "Connectware Inc", 0x116C : "Intelligent Resources", 0x116E : "Electronics for Imaging", 0x1170 : "Inventec Corporation", 0x1172 : "Altera Corporation", 0x1173 : "Adobe Systems", 0x1174 : "Bridgeport Machines", 0x1175 : "Mitron Computer Inc.", 0x1176 : "SBE", 0x1177 : "Silicon Engineering", 0x1178 : "Alfa Inc", 0x1179 : "Toshiba corporation", 0x117A : "A-Trend Technology", 0x117B : "LG (Lucky Goldstar) Electronics Inc.", 0x117C : "Atto Technology", 0x117D : "Becton &amp; Dickinson", 0x117E : "T/R Systems", 0x117F : "Integrated Circuit Systems", 0x1180 : "RicohCompany,Ltd.", 0x1183 : "Fujikura Ltd", 0x1184 : "Forks Inc", 0x1185 : "Dataworld", 0x1186 : "D-Link System Inc", 0x1187 : "Philips Healthcare", 0x1188 : "Shima Seiki Manufacturing Ltd.", 0x1189 : "Matsushita Electronics", 0x118A : "Hilevel Technology", 0x118B : "Hypertec Pty Ltd", 0x118C : "Corollary Inc", 0x118D : "BitFlow Inc", 0x118E : "Hermstedt AG", 0x118F : "Green Logic", 0x1190 : "Tripace", 0x1191 : "Acard Technology Corp.", 0x1192 : "Densan Co. Ltd", 0x1194 : "Toucan Technology", 0x1195 : "Ratoc System Inc", 0x1196 : "Hytec Electronics Ltd", 0x1197 : "Gage Applied Technologies", 0x1198 : "Lambda Systems Inc", 0x1199 : "Attachmate Corp.", 0x119A : "Mind/Share Inc.", 0x119B : "Omega Micro Inc.", 0x119C : "Information Technology Inst.", 0x119D : "Bug Sapporo Japan", 0x119E : "Fujitsu Microelectronics Ltd.", 0x119F : "Bull Hn Information Systems", 0x11A1 : "Hamamatsu Photonics K.K.", 0x11A2 : "Sierra Research and Technology", 0x11A3 : "Deuretzbacher GmbH &amp; Co. Eng. KG", 0x11A4 : "Barco", 0x11A5 : "MicroUnity Systems Engineering Inc.", 0x11A6 : "Pure Data", 0x11A7 : "Power Computing Corp.", 0x11A8 : "Systech Corp.", 0x11A9 : "InnoSys Inc.", 0x11AA : "Actel", 0x11AB : "Marvell Semiconductor", 0x11AC : "Canon Information Systems", 0x11AD : "Lite-On Technology Corp.", 0x11AE : "Scitex Corporation Ltd", 0x11AF : "Avid Technology, Inc.", 0x11B0 : "Quicklogic Corp", 0x11B1 : "Apricot Computers", 0x11B2 : "Eastman Kodak", 0x11B3 : "Barr Systems Inc.", 0x11B4 : "Leitch Technology International", 0x11B5 : "Radstone Technology Ltd.", 0x11B6 : "United Video Corp", 0x11B7 : "Motorola", 0x11B8 : "Xpoint Technologies Inc", 0x11B9 : "Pathlight Technology Inc.", 0x11BA : "Videotron Corp", 0x11BB : "Pyramid Technology", 0x11BC : "Network Peripherals Inc", 0x11BD : "Pinnacle system", 0x11BE : "International Microcircuits Inc", 0x11BF : "Astrodesign Inc.", 0x11C1 : "LSI Corporation", 0x11C2 : "Sand Microelectronics", 0x11C4 : "Document Technologies Ind.", 0x11C5 : "Shiva Corporatin", 0x11C6 : "Dainippon Screen Mfg. Co", 0x11C7 : "D.C.M. Data Systems", 0x11C8 : "Dolphin Interconnect Solutions", 0x11C9 : "MAGMA", 0x11CA : "LSI Systems Inc", 0x11CB : "Specialix International Ltd.", 0x11CC : "Michels &amp; Kleberhoff Computer GmbH", 0x11CD : "HAL Computer Systems Inc.", 0x11CE : "Primary Rate Inc", 0x11CF : "Pioneer Electronic Corporation", 0x11D0 : "BAE SYSTEMS - Manassas", 0x11D1 : "AuraVision Corporation", 0x11D2 : "Intercom Inc.", 0x11D3 : "Trancell Systems Inc", 0x11D4 : "Analog Devices, Inc.", 0x11D5 : "Tahoma Technology", 0x11D6 : "Tekelec Technologies", 0x11D7 : "TRENTON Technology, Inc.", 0x11D8 : "Image Technologies Development", 0x11D9 : "Tec Corporation", 0x11DA : "Novell", 0x11DB : "Sega Enterprises Ltd", 0x11DC : "Questra Corp", 0x11DD : "Crosfield Electronics Ltd", 0x11DE : "Zoran Corporation", 0x11E1 : "Gec Plessey Semi Inc", 0x11E2 : "Samsung Information Systems America", 0x11E3 : "Quicklogic Corp", 0x11E4 : "Second Wave Inc", 0x11E5 : "IIX Consulting", 0x11E6 : "Mitsui-Zosen System Research", 0x11E8 : "Digital Processing Systems Inc", 0x11E9 : "Highwater Designs Ltd", 0x11EA : "Elsag Bailey", 0x11EB : "Formation, Inc", 0x11EC : "Coreco Inc", 0x11ED : "Mediamatics", 0x11EE : "Dome Imaging Systems Inc", 0x11EF : "Nicolet Technologies BV", 0x11F0 : "Triya", 0x11F2 : "Picture Tel Japan KK", 0x11F3 : "Keithley Instruments, Inc", 0x11F4 : "Kinetic Systems Corporation", 0x11F5 : "Computing Devices Intl", 0x11F6 : "Powermatic Data Systems Ltd", 0x11F7 : "Scientific Atlanta", 0x11F8 : "PMC-Sierra Inc.", 0x11F9 : "I-Cube Inc", 0x11FA : "Kasan Electronics Co Ltd", 0x11FB : "Datel Inc", 0x11FD : "High Street Consultants", 0x11FE : "Comtrol Corp", 0x11FF : "Scion Corp", 0x1200 : "CSS Corp", 0x1201 : "Vista Controls Corp", 0x1202 : "Network General Corp", 0x1203 : "Bayer Corporation Agfa Div", 0x1204 : "Lattice Semiconductor Corp", 0x1205 : "Array Corp", 0x1206 : "Amdahl Corp", 0x1208 : "Parsytec GmbH", 0x1209 : "Sci Systems Inc", 0x120A : "Synaptel", 0x120B : "Adaptive Solutions", 0x120D : "Compression Labs Inc.", 0x120E : "Cyclades Corporation", 0x120F : "Essential Communications", 0x1210 : "Hyperparallel Technologies", 0x1211 : "Braintech Inc", 0x1213 : "Applied Intelligent Systems Inc", 0x1214 : "Performance Technologies Inc", 0x1215 : "Interware Co Ltd", 0x1216 : "Purup-Eskofot A/S", 0x1217 : "O2Micro Inc", 0x1218 : "Hybricon Corp", 0x1219 : "First Virtual Corp", 0x121A : "3dfx Interactive Inc", 0x121B : "Advanced Telecommunications Modules", 0x121C : "Nippon Texa Co Ltd", 0x121D : "LiPPERT Embedded Computers GmbH", 0x121E : "CSPI", 0x121F : "Arcus Technology Inc", 0x1220 : "Ariel Corporation", 0x1221 : "Contec Microelectronics Europe BV", 0x1222 : "Ancor Communications Inc", 0x1223 : "Artesyn Embedded Technologies", 0x1224 : "Interactive Images", 0x1225 : "Power I/O Inc.", 0x1227 : "Tech-Source", 0x1228 : "Norsk Elektro Optikk A/S", 0x1229 : "Data Kinesis Inc.", 0x122A : "Integrated Telecom", 0x122B : "LG Industrial Systems Co. Ltd.", 0x122C : "sci-worx GmbH", 0x122D : "Aztech System Ltd", 0x122E : "Absolute Analysis", 0x122F : "Andrew Corp.", 0x1230 : "Fishcamp Engineering", 0x1231 : "Woodward McCoach Inc.", 0x1233 : "Bus-Tech Inc.", 0x1234 : "Technical Corp", 0x1236 : "Sigma Designs, Inc", 0x1237 : "Alta Technology Corp.", 0x1238 : "Adtran", 0x1239 : "The 3DO Company", 0x123A : "Visicom Laboratories Inc.", 0x123B : "Seeq Technology Inc.", 0x123C : "Century Systems Inc.", 0x123D : "Engineering Design Team Inc.", 0x123F : "C-Cube Microsystems", 0x1240 : "Marathon Technologies Corp.", 0x1241 : "DSC Communications", 0x1242 : "JNI Corporation", 0x1243 : "Delphax", 0x1244 : "AVM AUDIOVISUELLES MKTG &amp; Computer GmbH", 0x1245 : "APD S.A.", 0x1246 : "Dipix Technologies Inc", 0x1247 : "Xylon Research Inc.", 0x1248 : "Central Data Corp.", 0x1249 : "Samsung Electronics Co. Ltd.", 0x124A : "AEG Electrocom GmbH", 0x124C : "Solitron Technologies Inc.", 0x124D : "Stallion Technologies", 0x124E : "Cylink", 0x124F : "Infortrend Technology Inc", 0x1250 : "Hitachi Microcomputer System Ltd.", 0x1251 : "VLSI Solution OY", 0x1253 : "Guzik Technical Enterprises", 0x1254 : "Linear Systems Ltd.", 0x1255 : "Optibase Ltd.", 0x1256 : "Perceptive Solutions Inc.", 0x1257 : "Vertex Networks Inc.", 0x1258 : "Gilbarco Inc.", 0x1259 : "Allied Telesyn International", 0x125A : "ABB Power Systems", 0x125B : "Asix Electronics Corp.", 0x125C : "Aurora Technologies Inc.", 0x125D : "ESS Technology", 0x125E : "Specialvideo Engineering SRL", 0x125F : "Concurrent Technologies Inc.", 0x1260 : "Intersil Corporation", 0x1261 : "Matsushita-Kotobuki Electronics Indu", 0x1262 : "ES Computer Co. Ltd.", 0x1263 : "Sonic Solutions", 0x1264 : "Aval Nagasaki Corp.", 0x1265 : "Casio Computer Co. Ltd.", 0x1266 : "Microdyne Corp.", 0x1267 : "S.A. Telecommunications", 0x1268 : "Tektronix", 0x1269 : "Thomson-CSF/TTM", 0x126A : "Lexmark International Inc.", 0x126B : "Adax Inc.", 0x126C : "Nortel Networks Corp.", 0x126D : "Splash Technology Inc.", 0x126E : "Sumitomo Metal Industries Ltd.", 0x126F : "Silicon Motion", 0x1270 : "Olympus Optical Co. Ltd.", 0x1271 : "GW Instruments", 0x1272 : "themrtaish", 0x1273 : "Hughes Network Systems", 0x1274 : "Ensoniq", 0x1275 : "Network Appliance", 0x1276 : "Switched Network Technologies Inc.", 0x1277 : "Comstream", 0x1278 : "Transtech Parallel Systems", 0x1279 : "Transmeta Corp.", 0x127B : "Pixera Corp", 0x127C : "Crosspoint Solutions Inc.", 0x127D : "Vela Research LP", 0x127E : "Winnov L.P.", 0x127F : "Fujifilm", 0x1280 : "Photoscript Group Ltd.", 0x1281 : "Yokogawa Electronic Corp.", 0x1282 : "Davicom Semiconductor Inc.", 0x1283 : "Waldo", 0x1285 : "Platform Technologies Inc.", 0x1286 : "MAZeT GmbH", 0x1287 : "LuxSonor Inc.", 0x1288 : "Timestep Corp.", 0x1289 : "AVC Technology Inc.", 0x128A : "Asante Technologies Inc.", 0x128B : "Transwitch Corp.", 0x128C : "Retix Corp.", 0x128D : "G2 Networks Inc.", 0x128F : "Tateno Dennou Inc.", 0x1290 : "Sord Computer Corp.", 0x1291 : "NCS Computer Italia", 0x1292 : "Tritech Microelectronics Intl PTE", 0x1293 : "Media Reality Technology", 0x1294 : "Rhetorex Inc.", 0x1295 : "Imagenation Corp.", 0x1296 : "Kofax Image Products", 0x1297 : "Shuttle Computer", 0x1298 : "Spellcaster Telecommunications Inc.", 0x1299 : "Knowledge Technology Laboratories", 0x129A : "Curtiss Wright Controls Electronic Systems", 0x129B : "Image Access", 0x129D : "CompCore Multimedia Inc.", 0x129E : "Victor Co. of Japan Ltd.", 0x129F : "OEC Medical Systems Inc.", 0x12A0 : "Allen Bradley Co.", 0x12A1 : "Simpact Inc", 0x12A2 : "NewGen Systems Corp.", 0x12A3 : "Lucent Technologies AMR", 0x12A4 : "NTT Electronics Corp.", 0x12A5 : "Vision Dynamics Ltd.", 0x12A6 : "Scalable Networks Inc.", 0x12A7 : "AMO GmbH", 0x12A8 : "News Datacom", 0x12A9 : "Xiotech Corp.", 0x12AA : "SDL Communications Inc.", 0x12AB : "Yuan Yuan Enterprise Co. Ltd.", 0x12AC : "MeasureX Corp.", 0x12AD : "MULTIDATA GmbH", 0x12AE : "Alteon Networks Inc.", 0x12AF : "TDK USA Corp.", 0x12B0 : "Jorge Scientific Corp.", 0x12B1 : "GammaLink", 0x12B2 : "General Signal Networks", 0x12B3 : "Interface Corp. Ltd.", 0x12B4 : "Future Tel Inc.", 0x12B5 : "Granite Systems Inc.", 0x12B7 : "Acumen", 0x12B8 : "Korg", 0x12B9 : "3Com Corporation", 0x12BA : "Bittware, Inc", 0x12BB : "Nippon Unisoft Corp.", 0x12BC : "Array Microsystems", 0x12BD : "Computerm Corp.", 0x12BF : "Fujifilm Microdevices", 0x12C0 : "Infimed", 0x12C1 : "GMM Research Corp.", 0x12C2 : "Mentec Ltd.", 0x12C3 : "Holtek Microelectronics Inc.", 0x12C4 : "Connect Tech Inc.", 0x12C5 : "Picture Elements Inc.", 0x12C6 : "Mitani Corp.", 0x12C7 : "Dialogic Corp.", 0x12C8 : "G Force Co. Ltd.", 0x12C9 : "Gigi Operations", 0x12CA : "Integrated Computing Engines, Inc.", 0x12CB : "Antex Electronics Corp.", 0x12CC : "Pluto Technologies International", 0x12CD : "Aims Lab", 0x12CE : "Netspeed Inc.", 0x12CF : "Prophet Systems Inc.", 0x12D0 : "GDE Systems Inc.", 0x12D1 : "Huawei Technologies Co., Ltd.", 0x12D3 : "Vingmed Sound A/S", 0x12D4 : "Ulticom, Inc.", 0x12D5 : "Equator Technologies", 0x12D6 : "Analogic Corp.", 0x12D7 : "Biotronic SRL", 0x12D8 : "Pericom Semiconductor", 0x12D9 : "Aculab Plc.", 0x12DA : "TrueTime", 0x12DB : "Annapolis Micro Systems Inc.", 0x12DC : "Symicron Computer Communication Ltd.", 0x12DD : "Management Graphics Inc.", 0x12DE : "Rainbow Technologies", 0x12DF : "SBS Technologies Inc.", 0x12E0 : "Chase Research PLC", 0x12E1 : "Nintendo Co. Ltd.", 0x12E2 : "Datum Inc. Bancomm-Timing Division", 0x12E3 : "Imation Corp. - Medical Imaging Syst", 0x12E4 : "Brooktrout Technology Inc.", 0x12E6 : "Cirel Systems", 0x12E7 : "Sebring Systems Inc", 0x12E8 : "CRISC Corp.", 0x12E9 : "GE Spacenet", 0x12EB : "Aureal Semiconductor", 0x12EC : "3A International Inc.", 0x12ED : "Optivision Inc.", 0x12EE : "Orange Micro, Inc.", 0x12EF : "Vienna Systems", 0x12F0 : "Pentek", 0x12F1 : "Sorenson Vision Inc.", 0x12F2 : "Gammagraphx Inc.", 0x12F4 : "Megatel", 0x12F5 : "Forks", 0x12F7 : "Cognex", 0x12F8 : "Electronic-Design GmbH", 0x12F9 : "FourFold Technologies", 0x12FB : "Spectrum Signal Processing", 0x12FC : "Capital Equipment Corp", 0x12FE : "esd Electronic System Design GmbH", 0x1303 : "Innovative Integration", 0x1304 : "Juniper Networks Inc.", 0x1307 : "ComputerBoards", 0x1308 : "Jato Technologies Inc.", 0x130A : "Mitsubishi Electric Microcomputer", 0x130B : "Colorgraphic Communications Corp", 0x130F : "Advanet Inc.", 0x1310 : "Gespac", 0x1312 : "Microscan Systems Inc", 0x1313 : "Yaskawa Electric Co.", 0x1316 : "Teradyne Inc.", 0x1317 : "ADMtek Inc", 0x1318 : "Packet Engines, Inc.", 0x1319 : "Forte Media", 0x131F : "SIIG", 0x1325 : "austriamicrosystems", 0x1326 : "Seachange International", 0x1328 : "CIFELLI SYSTEMS CORPORATION", 0x1331 : "RadiSys Corporation", 0x1332 : "Curtiss-Wright Controls Embedded Computing", 0x1335 : "Videomail Inc.", 0x133D : "Prisa Networks", 0x133F : "SCM Microsystems", 0x1342 : "Promax Systems Inc", 0x1344 : "Micron Technology, Inc.", 0x1347 : "Spectracom Corporation", 0x134A : "DTC Technology Corp.", 0x134B : "ARK Research Corp.", 0x134C : "Chori Joho System Co. Ltd", 0x134D : "PCTEL Inc.", 0x135A : "Brain Boxes Limited", 0x135B : "Giganet Inc.", 0x135C : "Quatech Inc", 0x135D : "ABB Network Partner AB", 0x135E : "Sealevel Systems Inc.", 0x135F : "I-Data International A-S", 0x1360 : "Meinberg Funkuhren GmbH &amp; Co. KG", 0x1361 : "Soliton Systems K.K.", 0x1363 : "Phoenix Technologies Ltd", 0x1365 : "Hypercope Corp.", 0x1366 : "Teijin Seiki Co. Ltd.", 0x1367 : "Hitachi Zosen Corporation", 0x1368 : "Skyware Corporation", 0x1369 : "Digigram", 0x136B : "Kawasaki Steel Corporation", 0x136C : "Adtek System Science Co Ltd", 0x1375 : "Boeing - Sunnyvale", 0x137A : "Mark Of The Unicorn Inc", 0x137B : "PPT Vision", 0x137C : "Iwatsu Electric Co Ltd", 0x137D : "Dynachip Corporation", 0x137E : "Patriot Scientific Corp.", 0x1380 : "Sanritz Automation Co LTC", 0x1381 : "Brains Co. Ltd", 0x1382 : "Marian - Electronic &amp; Software", 0x1384 : "Stellar Semiconductor Inc", 0x1385 : "Netgear", 0x1387 : "Curtiss-Wright Controls Electronic Systems", 0x1388 : "Hitachi Information Technology Co Ltd", 0x1389 : "Applicom International", 0x138A : "Validity Sensors, Inc.", 0x138B : "Tokimec Inc", 0x138E : "Basler GMBH", 0x138F : "Patapsco Designs Inc", 0x1390 : "Concept Development Inc.", 0x1393 : "Moxa Technologies Co Ltd", 0x1394 : "Level One Communications", 0x1395 : "Ambicom Inc", 0x1396 : "Cipher Systems Inc", 0x1397 : "Cologne Chip Designs GmbH", 0x1398 : "Clarion Co. Ltd", 0x139A : "Alacritech Inc", 0x139D : "Xstreams PLC/ EPL Limited", 0x139E : "Echostar Data Networks", 0x13A0 : "Crystal Group Inc", 0x13A1 : "Kawasaki Heavy Industries Ltd", 0x13A3 : "HI-FN Inc.", 0x13A4 : "Rascom Inc", 0x13A7 : "amc330", 0x13A8 : "Exar Corp.", 0x13A9 : "Siemens Healthcare", 0x13AA : "Nortel Networks - BWA Division", 0x13AF : "T.Sqware", 0x13B1 : "Tamura Corporation", 0x13B4 : "Wellbean Co Inc", 0x13B5 : "ARM Ltd", 0x13B6 : "DLoG Gesellschaft fr elektronische Datentechnik mbH", 0x13B8 : "Nokia Telecommunications OY", 0x13BD : "Sharp Corporation", 0x13BF : "Sharewave Inc", 0x13C0 : "Microgate Corp.", 0x13C1 : "LSI", 0x13C2 : "Technotrend Systemtechnik GMBH", 0x13C3 : "Janz Computer AG", 0x13C7 : "Blue Chip Technology Ltd", 0x13CC : "Metheus Corporation", 0x13CF : "Studio Audio &amp; Video Ltd", 0x13D0 : "B2C2 Inc", 0x13D1 : "AboCom Systems, Inc", 0x13D4 : "Graphics Microsystems Inc", 0x13D6 : "K.I. Technology Co Ltd", 0x13D7 : "Toshiba Engineering Corporation", 0x13D8 : "Phobos Corporation", 0x13D9 : "Apex Inc", 0x13DC : "Netboost Corporation", 0x13DE : "ABB Robotics Products AB", 0x13DF : "E-Tech Inc.", 0x13E0 : "GVC Corporation", 0x13E3 : "Nest Inc", 0x13E4 : "Calculex Inc", 0x13E5 : "Telesoft Design Ltd", 0x13E9 : "Intraserver Technology Inc", 0x13EA : "Dallas Semiconductor", 0x13F0 : "IC Plus Corporation", 0x13F1 : "OCE - Industries S.A.", 0x13F4 : "Troika Networks Inc", 0x13F6 : "C-Media Electronics Inc.", 0x13F9 : "NTT Advanced Technology Corp.", 0x13FA : "Pentland Systems Ltd.", 0x13FB : "Aydin Corp", 0x13FD : "Micro Science Inc", 0x13FE : "Advantech Co., Ltd.", 0x13FF : "Silicon Spice Inc.", 0x1400 : "ArtX Inc", 0x1402 : "Meilhaus Electronic GmbH Germany", 0x1404 : "Fundamental Software Inc", 0x1406 : "Oce Print Logics Technologies S.A.", 0x1407 : "Lava Computer MFG Inc.", 0x1408 : "Aloka Co. Ltd", 0x1409 : "SUNIX Co., Ltd.", 0x140A : "DSP Research Inc", 0x140B : "Ramix Inc", 0x140D : "Matsushita Electric Works Ltd", 0x140F : "Salient Systems Corp", 0x1412 : "IC Ensemble, Inc.", 0x1413 : "Addonics", 0x1415 : "Oxford Semiconductor Ltd - now part of PLX Technology ", 0x1418 : "Kyushu Electronics Systems Inc", 0x1419 : "Excel Switching Corp", 0x141B : "Zoom Telephonics Inc", 0x141E : "Fanuc Co. Ltd", 0x141F : "Visiontech Ltd", 0x1420 : "Psion Dacom PLC", 0x1425 : "Chelsio Communications", 0x1428 : "Edec Co Ltd", 0x1429 : "Unex Technology Corp.", 0x142A : "Kingmax Technology Inc", 0x142B : "Radiolan", 0x142C : "Minton Optic Industry Co Ltd", 0x142D : "Pixstream Inc", 0x1430 : "ITT Aerospace/Communications Division", 0x1433 : "Eltec Elektronik AG", 0x1435 : "RTD Embedded Technologies, Inc.", 0x1436 : "CIS Technology Inc", 0x1437 : "Nissin Inc Co", 0x1438 : "Atmel-Dream", 0x143F : "Lightwell Co Ltd - Zax Division", 0x1441 : "Agie SA.", 0x1443 : "Unibrain S.A.", 0x1445 : "Logical Co Ltd", 0x1446 : "Graphin Co., LTD", 0x1447 : "Aim GMBH", 0x1448 : "Alesis Studio", 0x144A : "ADLINK Technology Inc", 0x144B : "Loronix Information Systems, Inc.", 0x144D : "sanyo", 0x1450 : "Octave Communications Ind.", 0x1451 : "SP3D Chip Design GMBH", 0x1453 : "Mycom Inc", 0x1458 : "Giga-Byte Technologies", 0x145C : "Cryptek", 0x145F : "Baldor Electric Company", 0x1460 : "Dynarc Inc", 0x1462 : "Micro-Star International Co Ltd", 0x1463 : "Fast Corporation", 0x1464 : "Interactive Circuits &amp; Systems Ltd", 0x1468 : "Ambit Microsystems Corp.", 0x1469 : "Cleveland Motion Controls", 0x146C : "Ruby Tech Corp.", 0x146D : "Tachyon Inc.", 0x146E : "WMS Gaming", 0x1471 : "Integrated Telecom Express Inc", 0x1473 : "Zapex Technologies Inc", 0x1474 : "Doug Carson &amp; Associates", 0x1477 : "Net Insight", 0x1478 : "Diatrend Corporation", 0x147B : "Abit Computer Corp.", 0x147F : "Nihon Unisys Ltd.", 0x1482 : "Isytec - Integrierte Systemtechnik Gmbh", 0x1483 : "Labway Coporation", 0x1485 : "Erma - Electronic GMBH", 0x1489 : "KYE Systems Corporation", 0x148A : "Opto 22", 0x148B : "Innomedialogic Inc.", 0x148C : "C.P. Technology Co. Ltd", 0x148D : "Digicom Systems Inc.", 0x148E : "OSI Plus Corporation", 0x148F : "Plant Equipment Inc.", 0x1490 : "TC Labs Pty Ltd.", 0x1491 : "Futronic ", 0x1493 : "Maker Communications", 0x1495 : "Tokai Communications Industry Co. Ltd", 0x1496 : "Joytech Computer Co. Ltd.", 0x1497 : "SMA Technologie AG", 0x1498 : "Tews Technologies", 0x1499 : "Micro-Technology Co Ltd", 0x149A : "Andor Technology Ltd", 0x149B : "Seiko Instruments Inc", 0x149E : "Mapletree Networks Inc.", 0x149F : "Lectron Co Ltd", 0x14A0 : "Softing AG", 0x14A2 : "Millennium Engineering Inc", 0x14A4 : "GVC/BCM Advanced Research", 0x14A9 : "Hivertec Inc.", 0x14AB : "Mentor Graphics Corp.", 0x14B1 : "Nextcom K.K.", 0x14B3 : "Xpeed Inc.", 0x14B4 : "Philips Business Electronics B.V.", 0x14B5 : "Creamware GmbH", 0x14B6 : "Quantum Data Corp.", 0x14B7 : "Proxim Inc.", 0x14B9 : "Aironet Wireless Communication", 0x14BA : "Internix Inc.", 0x14BB : "Semtech Corporation", 0x14BE : "L3 Communications", 0x14C0 : "Compal Electronics, Inc.", 0x14C1 : "Myricom Inc.", 0x14C2 : "DTK Computer", 0x14C4 : "Iwasaki Information Systems Co Ltd", 0x14C5 : "ABB AB (Sweden)", 0x14C6 : "Data Race Inc", 0x14C7 : "Modular Technology Ltd.", 0x14C8 : "Turbocomm Tech Inc", 0x14C9 : "Odin Telesystems Inc", 0x14CB : "Billionton Systems Inc./Cadmus Micro Inc", 0x14CD : "Universal Scientific Ind.", 0x14CF : "TEK Microsystems Inc.", 0x14D4 : "Panacom Technology Corporation", 0x14D5 : "Nitsuko Corporation", 0x14D6 : "Accusys Inc", 0x14D7 : "Hirakawa Hewtech Corp", 0x14D8 : "Hopf Elektronik GMBH", 0x14D9 : "Alpha Processor Inc", 0x14DB : "Avlab Technology Inc.", 0x14DC : "Amplicon Liveline Limited", 0x14DD : "Imodl Inc.", 0x14DE : "Applied Integration Corporation", 0x14E3 : "Amtelco", 0x14E4 : "Broadcom", 0x14EA : "Planex Communications, Inc.", 0x14EB : "Seiko Epson Corporation", 0x14EC : "Acqiris", 0x14ED : "Datakinetics Ltd", 0x14EF : "Carry Computer Eng. Co Ltd", 0x14F1 : "Conexant", 0x14F2 : "Mobility Electronics, Inc.", 0x14F4 : "Tokyo Electronic Industry Co. Ltd.", 0x14F5 : "Sopac Ltd", 0x14F6 : "Coyote Technologies LLC", 0x14F7 : "Wolf Technology Inc", 0x14F8 : "Audiocodes Inc", 0x14F9 : "AG Communications", 0x14FB : "Transas Marine (UK) Ltd", 0x14FC : "Quadrics Ltd", 0x14FD : "Silex Technology Inc.", 0x14FE : "Archtek Telecom Corp.", 0x14FF : "Twinhead International Corp.", 0x1501 : "Banksoft Canada Ltd", 0x1502 : "Mitsubishi Electric Logistics Support Co", 0x1503 : "Kawasaki LSI USA Inc", 0x1504 : "Kaiser Electronics", 0x1506 : "Chameleon Systems Inc", 0x1507 : "Htec Ltd.", 0x1509 : "First International Computer Inc", 0x150B : "Yamashita Systems Corp", 0x150C : "Kyopal Co Ltd", 0x150D : "Warpspped Inc", 0x150E : "C-Port Corporation", 0x150F : "Intec GMBH", 0x1510 : "Behavior Tech Computer Corp", 0x1511 : "Centillium Technology Corp", 0x1512 : "Rosun Technologies Inc", 0x1513 : "Raychem", 0x1514 : "TFL LAN Inc", 0x1515 : "ICS Advent", 0x1516 : "Myson Technology Inc", 0x1517 : "Echotek Corporation", 0x1518 : "Kontron Modular Computers GmbH (PEP Modular Computers GMBH)", 0x1519 : "Telefon Aktiebolaget LM Ericsson", 0x151A : "Globetek Inc.", 0x151B : "Combox Ltd", 0x151C : "Digital Audio Labs Inc", 0x151D : "Fujitsu Computer Products Of America", 0x151E : "Matrix Corp.", 0x151F : "Topic Semiconductor Corp", 0x1520 : "Chaplet System Inc", 0x1521 : "Bell Corporation", 0x1522 : "Mainpine Limited", 0x1523 : "Music Semiconductors", 0x1524 : "ENE Technology Inc", 0x1525 : "Impact Technologies", 0x1526 : "ISS Inc", 0x1527 : "Solectron", 0x1528 : "Acksys", 0x1529 : "American Microsystems Inc", 0x152A : "Quickturn Design Systems", 0x152B : "Flytech Technology Co Ltd", 0x152C : "Macraigor Systems LLC", 0x152D : "Quanta Computer Inc", 0x152E : "Melec Inc", 0x152F : "Philips - Crypto", 0x1532 : "Echelon Corporation", 0x1533 : "Baltimore", 0x1534 : "Road Corporation", 0x1535 : "Evergreen Technologies Inc", 0x1537 : "Datalex Communcations", 0x1538 : "Aralion Inc.", 0x1539 : "Atelier Informatiques et Electronique Et", 0x153A : "ONO Sokki", 0x153B : "Terratec Electronic GMBH", 0x153C : "Antal Electronic", 0x153D : "Filanet Corporation", 0x153E : "Techwell Inc", 0x153F : "MIPS Technologies, Inc", 0x1540 : "Provideo Multimedia Co Ltd", 0x1541 : "Telocity Inc.", 0x1542 : "Vivid Technology Inc", 0x1543 : "Silicon Laboratories", 0x1544 : "DCM Technologies Ltd.", 0x1545 : "VisionTek", 0x1546 : "IOI Technology Corp.", 0x1547 : "Mitutoyo Corporation", 0x1548 : "Jet Propulsion Laboratory", 0x1549 : "Interconnect Systems Solutions", 0x154A : "Max Technologies Inc.", 0x154B : "Computex Co Ltd", 0x154C : "Visual Technology Inc.", 0x154D : "PAN International Industrial Corp", 0x154E : "Servotest Ltd", 0x154F : "Stratabeam Technology", 0x1550 : "Open Network Co Ltd", 0x1551 : "Smart Electronic Development GMBH", 0x1553 : "Chicony Electronics Co Ltd", 0x1554 : "Prolink Microsystems Corp.", 0x1555 : "Gesytec GmbH", 0x1556 : "PLDA", 0x1557 : "Mediastar Co. Ltd", 0x1558 : "Clevo/Kapok Computer", 0x1559 : "SI Logic Ltd", 0x155A : "Innomedia Inc", 0x155B : "Protac International Corp", 0x155C : "s", 0x155D : "MAC System Co Ltd", 0x155E : "KUKA Roboter GmbH", 0x155F : "Perle Systems Limited", 0x1560 : "Terayon Communications Systems", 0x1561 : "Viewgraphics Inc", 0x1562 : "Symbol Technologies, Inc.", 0x1563 : "A-Trend Technology Co Ltd", 0x1564 : "Yamakatsu Electronics Industry Co Ltd", 0x1565 : "Biostar Microtech Intl Corp", 0x1566 : "Ardent Technologies Inc", 0x1567 : "Jungsoft", 0x1568 : "DDK Electronics Inc", 0x1569 : "Palit Microsystems Inc", 0x156A : "Avtec Systems Inc", 0x156B : "S2io Inc", 0x156C : "Vidac Electronics GMBH", 0x156D : "Alpha-Top Corp", 0x156E : "Alfa Inc.", 0x156F : "M-Systems Flash Disk Pioneers Ltd", 0x1570 : "Lecroy Corporation", 0x1571 : "Contemporary Controls", 0x1572 : "Otis Elevator Company", 0x1573 : "Lattice - Vantis", 0x1574 : "Fairchild Semiconductor", 0x1575 : "Voltaire Advanced Data Security Ltd", 0x1576 : "Viewcast Com", 0x1578 : "Hitt", 0x1579 : "Dual Technology Corporation", 0x157A : "Japan Elecronics Ind. Inc", 0x157B : "Star Multimedia Corp.", 0x157C : "Eurosoft (UK)", 0x157D : "Gemflex Networks", 0x157E : "Transition Networks", 0x157F : "PX Instruments Technology Ltd", 0x1580 : "Primex Aerospace Co.", 0x1581 : "SEH Computertechnik GMBH", 0x1582 : "Cytec Corporation", 0x1583 : "Inet Technologies Inc", 0x1584 : "Vetronix Corporation Engenharia Ltda", 0x1585 : "Marconi Commerce Systems SRL", 0x1586 : "Lancast Inc", 0x1587 : "Konica Corporation", 0x1588 : "Solidum Systems Corp", 0x1589 : "Atlantek Microsystems Pty Ltd", 0x158A : "Digalog Systems Inc", 0x158B : "Allied Data Technologies", 0x158C : "Hitachi Semiconductor &amp; Devices Sales Co", 0x158D : "Point Multimedia Systems", 0x158E : "Lara Technology Inc", 0x158F : "Ditect Coop", 0x1590 : "3pardata Inc.", 0x1591 : "ARN", 0x1592 : "Syba Tech Ltd.", 0x1593 : "Bops Inc", 0x1594 : "Netgame Ltd", 0x1595 : "Diva Systems Corp.", 0x1596 : "Folsom Research Inc", 0x1597 : "Memec Design Services", 0x1598 : "Granite Microsystems", 0x1599 : "Delta Electronics Inc", 0x159A : "General Instrument", 0x159B : "Faraday Technology Corp", 0x159C : "Stratus Computer Systems", 0x159D : "Ningbo Harrison Electronics Co Ltd", 0x159E : "A-Max Technology Co Ltd", 0x159F : "Galea Network Security", 0x15A0 : "Compumaster SRL", 0x15A1 : "Geocast Network Systems Inc", 0x15A2 : "Catalyst Enterprises Inc", 0x15A3 : "Italtel", 0x15A4 : "X-Net OY", 0x15A5 : "Toyota MACS Inc", 0x15A6 : "Sunlight Ultrasound Technologies Ltd", 0x15A7 : "SSE Telecom Inc", 0x15A8 : "Shanghai Communications Technologies Cen", 0x15AA : "Moreton Bay", 0x15AB : "Bluesteel Networks Inc", 0x15AC : "North Atlantic Instruments", 0x15AD : "VMware Inc.", 0x15AE : "Amersham Pharmacia Biotech", 0x15B0 : "Zoltrix International Limited", 0x15B1 : "Source Technology Inc", 0x15B2 : "Mosaid Technologies Inc.", 0x15B3 : "Mellanox Technology", 0x15B4 : "CCI/Triad", 0x15B5 : "Cimetrics Inc", 0x15B6 : "Texas Memory Systems Inc", 0x15B7 : "Sandisk Corp.", 0x15B8 : "Addi-Data GMBH", 0x15B9 : "Maestro Digital Communications", 0x15BA : "Impacct Technology Corp", 0x15BB : "Portwell Inc", 0x15BC : "Agilent Technologies", 0x15BD : "DFI Inc.", 0x15BE : "Sola Electronics", 0x15BF : "High Tech Computer Corp (HTC)", 0x15C0 : "BVM Limited", 0x15C1 : "Quantel", 0x15C2 : "Newer Technology Inc", 0x15C3 : "Taiwan Mycomp Co Ltd", 0x15C4 : "EVSX Inc", 0x15C5 : "Procomp Informatics Ltd", 0x15C6 : "Technical University Of Budapest", 0x15C7 : "Tateyama System Laboratory Co Ltd", 0x15C8 : "Penta Media Co. Ltd", 0x15C9 : "Serome Technology Inc", 0x15CA : "Bitboys OY", 0x15CB : "AG Electronics Ltd", 0x15CC : "Hotrail Inc.", 0x15CD : "Dreamtech Co Ltd", 0x15CE : "Genrad Inc.", 0x15CF : "Hilscher GMBH", 0x15D1 : "Infineon Technologies AG", 0x15D2 : "FIC (First International Computer Inc)", 0x15D3 : "NDS Technologies Israel Ltd", 0x15D4 : "Iwill Corporation", 0x15D5 : "Tatung Co.", 0x15D6 : "Entridia Corporation", 0x15D7 : "Rockwell-Collins Inc", 0x15D8 : "Cybernetics Technology Co Ltd", 0x15D9 : "Super Micro Computer Inc", 0x15DA : "Cyberfirm Inc.", 0x15DB : "Applied Computing Systems Inc.", 0x15DC : "Litronic Inc.", 0x15DD : "Sigmatel Inc.", 0x15DE : "Malleable Technologies Inc", 0x15E0 : "Cacheflow Inc", 0x15E1 : "Voice Technologies Group", 0x15E2 : "Quicknet Technologies Inc", 0x15E3 : "Networth Technologies Inc", 0x15E4 : "VSN Systemen BV", 0x15E5 : "Valley Technologies Inc", 0x15E6 : "Agere Inc.", 0x15E7 : "GET Engineering Corp.", 0x15E8 : "National Datacomm Corp.", 0x15E9 : "Pacific Digital Corp.", 0x15EA : "Tokyo Denshi Sekei K.K.", 0x15EB : "Drsearch GMBH", 0x15EC : "Beckhoff Automation GmbH", 0x15ED : "Macrolink Inc", 0x15EE : "IN Win Development Inc.", 0x15EF : "Intelligent Paradigm Inc", 0x15F0 : "B-Tree Systems Inc", 0x15F1 : "Times N Systems Inc", 0x15F2 : "SPOT Imaging Solutions a division of Diagnostic Instruments, Inc", 0x15F3 : "Digitmedia Corp.", 0x15F4 : "Valuesoft", 0x15F5 : "Power Micro Research", 0x15F6 : "Extreme Packet Device Inc", 0x15F7 : "Banctec", 0x15F8 : "Koga Electronics Co", 0x15F9 : "Zenith Electronics Co", 0x15FA : "Axzam Corporation", 0x15FB : "Zilog Inc.", 0x15FC : "Techsan Electronics Co Ltd", 0x15FD : "N-Cubed.Net", 0x15FE : "Kinpo Electronics Inc", 0x15FF : "Fastpoint Technologies Inc.", 0x1600 : "Northrop Grumman - Canada Ltd", 0x1601 : "Tenta Technology", 0x1602 : "Prosys-TEC Inc.", 0x1603 : "Nokia Wireless Business Communications", 0x1604 : "Central System Research Co Ltd", 0x1605 : "Pairgain Technologies", 0x1606 : "Europop AG", 0x1607 : "Lava Semiconductor Manufacturing Inc.", 0x1608 : "Automated Wagering International", 0x1609 : "Sciemetric Instruments Inc", 0x160A : "Kollmorgen Servotronix", 0x160B : "Onkyo Corp.", 0x160C : "Oregon Micro Systems Inc.", 0x160D : "Aaeon Electronics Inc", 0x160E : "CML Emergency Services", 0x160F : "ITEC Co Ltd", 0x1610 : "Tottori Sanyo Electric Co Ltd", 0x1611 : "Bel Fuse Inc.", 0x1612 : "Telesynergy Research Inc.", 0x1613 : "System Craft Inc.", 0x1614 : "Jace Tech Inc.", 0x1615 : "Equus Computer Systems Inc", 0x1616 : "Iotech Inc.", 0x1617 : "Rapidstream Inc", 0x1618 : "Esec SA", 0x1619 : "FarSite Communications Limited", 0x161B : "Mobilian Israel Ltd", 0x161C : "Berkshire Products", 0x161D : "Gatec", 0x161E : "Kyoei Sangyo Co Ltd", 0x161F : "Arima Computer Corporation", 0x1620 : "Sigmacom Co Ltd", 0x1621 : "Lynx Studio Technology Inc", 0x1622 : "Nokia Home Communications", 0x1623 : "KRF Tech Ltd", 0x1624 : "CE Infosys GMBH", 0x1625 : "Warp Nine Engineering", 0x1626 : "TDK Semiconductor Corp.", 0x1627 : "BCom Electronics Inc", 0x1629 : "Kongsberg Spacetec a.s.", 0x162A : "Sejin Computerland Co Ltd", 0x162B : "Shanghai Bell Company Limited", 0x162C : "C&amp;H Technologies Inc", 0x162D : "Reprosoft Co Ltd", 0x162E : "Margi Systems Inc", 0x162F : "Rohde &amp; Schwarz GMBH &amp; Co KG", 0x1630 : "Sky Computers Inc", 0x1631 : "NEC Computer International", 0x1632 : "Verisys Inc", 0x1633 : "Adac Corporation", 0x1634 : "Visionglobal Network Corp.", 0x1635 : "Decros / S.ICZ a.s.", 0x1636 : "Jean Company Ltd", 0x1637 : "NSI", 0x1638 : "Eumitcom Technology Inc", 0x163A : "Air Prime Inc", 0x163B : "Glotrex Co Ltd", 0x163C : "intel", 0x163D : "Heidelberg Digital LLC", 0x163E : "3dpower", 0x163F : "Renishaw PLC", 0x1640 : "Intelliworxx Inc", 0x1641 : "MKNet Corporation", 0x1642 : "Bitland", 0x1643 : "Hajime Industries Ltd", 0x1644 : "Western Avionics Ltd", 0x1645 : "Quick-Serv. Computer Co. Ltd", 0x1646 : "Nippon Systemware Co Ltd", 0x1647 : "Hertz Systemtechnik GMBH", 0x1648 : "MeltDown Systems LLC", 0x1649 : "Jupiter Systems", 0x164A : "Aiwa Co. Ltd", 0x164C : "Department Of Defense", 0x164D : "Ishoni Networks", 0x164E : "Micrel Inc.", 0x164F : "Datavoice (Pty) Ltd.", 0x1650 : "Admore Technology Inc.", 0x1651 : "Chaparral Network Storage", 0x1652 : "Spectrum Digital Inc.", 0x1653 : "Nature Worldwide Technology Corp", 0x1654 : "Sonicwall Inc", 0x1655 : "Dazzle Multimedia Inc.", 0x1656 : "Insyde Software Corp", 0x1657 : "Brocade Communications Systems", 0x1658 : "Med Associates Inc.", 0x1659 : "Shiba Denshi Systems Inc.", 0x165A : "Epix Inc.", 0x165B : "Real-Time Digital Inc.", 0x165C : "Kondo Kagaku", 0x165D : "Hsing Tech. Enterprise Co. Ltd.", 0x165E : "Hyunju Computer Co. Ltd.", 0x165F : "Comartsystem Korea", 0x1660 : "Network Security Technologies Inc. (NetSec)", 0x1661 : "Worldspace Corp.", 0x1662 : "Int Labs", 0x1663 : "Elmec Inc. Ltd.", 0x1664 : "Fastfame Technology Co. Ltd.", 0x1665 : "Edax Inc.", 0x1666 : "Norpak Corporation", 0x1667 : "CoSystems Inc.", 0x1668 : "Actiontec Electronics Inc.", 0x166A : "Komatsu Ltd.", 0x166B : "Supernet Inc.", 0x166C : "Shade Ltd.", 0x166D : "Sibyte Inc.", 0x166E : "Schneider Automation Inc.", 0x166F : "Televox Software Inc.", 0x1670 : "Rearden Steel", 0x1671 : "Atan Technology Inc.", 0x1672 : "Unitec Co. Ltd.", 0x1673 : "pctel", 0x1675 : "Square Wave Technology", 0x1676 : "Emachines Inc.", 0x1677 : "Bernecker + Rainer", 0x1678 : "INH Semiconductor", 0x1679 : "Tokyo Electron Device Ltd.", 0x167F : "iba AG", 0x1680 : "Dunti Corp.", 0x1681 : "Hercules", 0x1682 : "PINE Technology, Ltd.", 0x1688 : "CastleNet Technology Inc.", 0x168A : "Utimaco Safeware AG", 0x168B : "Circut Assembly Corp.", 0x168C : "Atheros Communications Inc.", 0x168D : "NMI Electronics Ltd.", 0x168E : "Hyundai MultiCAV Computer Co. Ltd.", 0x168F : "KDS Innotech Corp.", 0x1690 : "NetContinuum, Inc.", 0x1693 : "FERMA", 0x1695 : "EPoX Computer Co., Ltd.", 0x16AE : "SafeNet Inc.", 0x16B3 : "CNF Mobile Solutions", 0x16B8 : "Sonnet Technologies, Inc.", 0x16CA : "Cenatek Inc.", 0x16CB : "Minolta Co. Ltd.", 0x16CC : "Inari Inc.", 0x16D0 : "Systemax", 0x16E0 : "Third Millenium Test Solutions, Inc.", 0x16E5 : "Intellon Corporation", 0x16EC : "U.S. Robotics", 0x16F0 : "LaserLinc Inc.", 0x16F1 : "Adicti Corp.", 0x16F3 : "Jetway Information Co., Ltd", 0x16F6 : "VideoTele.com Inc.", 0x1700 : "Antara LLC", 0x1701 : "Interactive Computer Products Inc.", 0x1702 : "Internet Machines Corp.", 0x1703 : "Desana Systems", 0x1704 : "Clearwater Networks", 0x1705 : "Digital First", 0x1706 : "Pacific Broadband Communications", 0x1707 : "Cogency Semiconductor Inc.", 0x1708 : "Harris Corp.", 0x1709 : "Zarlink Semiconductor", 0x170A : "Alpine Electronics Inc.", 0x170B : "NetOctave Inc.", 0x170C : "YottaYotta Inc.", 0x170D : "SensoMotoric Instruments GmbH", 0x170E : "San Valley Systems, Inc.", 0x170F : "Cyberdyne Inc.", 0x1710 : "Pelago Networks", 0x1711 : "MyName Technologies, Inc.", 0x1712 : "NICE Systems Inc.", 0x1713 : "TOPCON Corp.", 0x1725 : "Vitesse Semiconductor", 0x1734 : "Fujitsu-Siemens Computers GmbH", 0x1737 : "LinkSys", 0x173B : "Altima Communications Inc.", 0x1743 : "Peppercon AG", 0x174B : "PC Partner Limited", 0x1752 : "Global Brands Manufacture Ltd.", 0x1753 : "TeraRecon, Inc.", 0x1755 : "Alchemy Semiconductor Inc.", 0x176A : "General Dynamics Canada", 0x1775 : "General Electric", 0x1789 : "Ennyah Technologies Corp", 0x1793 : "Unitech Electronics Co., Ltd", 0x17A1 : "Tascorp", 0x17A7 : "Start Network Technology Co., Ltd.", 0x17AA : "Legend Ltd. (Beijing)", 0x17AB : "Phillips Components", 0x17AF : "Hightech Information Systems, Ltd.", 0x17BE : "Philips Semiconductors", 0x17C0 : "Wistron Corp.", 0x17C4 : "Movita", 0x17CC : "NetChip", 0x17cd : "Cadence Design Systems", 0x17D5 : "Neterion Inc.", 0x17db : "Cray, Inc.", 0x17E9 : "DH electronics GmbH / Sabrent", 0x17EE : "Connect Components, Ltd.", 0x17F3 : "RDC Semiconductor Co., Ltd.", 0x17FE : "INPROCOMM", 0x1813 : "Ambient Technologies Inc", 0x1814 : "Ralink Technology, Corp.", 0x1815 : "devolo AG", 0x1820 : "InfiniCon Systems, Inc.", 0x1824 : "Avocent", 0x1841 : "Panda Platinum", 0x1860 : "Primagraphics Ltd.", 0x186C : "Humusoft S.R.O", 0x1887 : "Elan Digital Systems Ltd", 0x1888 : "Varisys Limited", 0x188D : "Millogic Ltd.", 0x1890 : "Egenera, Inc.", 0x18BC : "Info-Tek Corp.", 0x18C9 : "ARVOO Engineering BV", 0x18CA : "XGI Technology Inc", 0x18F1 : "Spectrum Systementwicklung Microelectronic GmbH", 0x18F4 : "Napatech A/S", 0x18F7 : "Commtech, Inc.", 0x18FB : "Resilience Corporation", 0x1904 : "Ritmo", 0x1905 : "WIS Technology, Inc.", 0x1910 : "Seaway Networks", 0x1912 : "Renesas Electronics", 0x1931 : "Option NV", 0x1941 : "Stelar", 0x1954 : "One Stop Systems, Inc.", 0x1969 : "Atheros Communications", 0x1971 : "AGEIA Technologies, Inc.", 0x197B : "JMicron Technology Corp.", 0x198a : "Nallatech", 0x1991 : "Topstar Digital Technologies Co., Ltd.", 0x19a2 : "ServerEngines", 0x19A8 : "DAQDATA GmbH", 0x19AC : "Kasten Chase Applied Research", 0x19B6 : "Mikrotik", 0x19E2 : "Vector Informatik GmbH", 0x19E3 : "DDRdrive LLC", 0x1A08 : "Linux Networx", 0x1a41 : "Tilera Corporation", 0x1A42 : "Imaginant", 0x1B13 : "Jaton Corporation USA", 0x1B21 : "Asustek - ASMedia Technology Inc.", 0x1B6F : "Etron", 0x1B73 : "Fresco Logic Inc.", 0x1B91 : "Averna", 0x1BAD : "ReFLEX CES", 0x1C0F : "Monarch Innovative Technologies Pvt Ltd's ", 0x1C32 : "Highland Technology, Inc.", 0x1c39 : "Thomson Video Networks", 0x1DE1 : "Tekram", 0x1FCF : "Miranda Technologies Ltd.", 0x2001 : "Temporal Research Ltd", 0x2646 : "Kingston Technology Co.", 0x270F : "ChainTek Computer Co. Ltd.", 0x2EC1 : "Zenic Inc", 0x3388 : "Hint Corp.", 0x3411 : "Quantum Designs (H.K.) Inc.", 0x3513 : "ARCOM Control Systems Ltd.", 0x38EF : "4links", 0x3D3D : "3Dlabs, Inc. Ltd", 0x4005 : "Avance Logic Inc.", 0x4144 : "Alpha Data", 0x416C : "Aladdin Knowledge Systems", 0x4348 : "wch.cn", 0x4680 : "UMAX Computer Corp.", 0x4843 : "Hercules Computer Technology", 0x4943 : "Growth Networks", 0x4954 : "Integral Technologies", 0x4978 : "Axil Computer Inc.", 0x4C48 : "Lung Hwa Electronics", 0x4C53 : "SBS-OR Industrial Computers", 0x4CA1 : "Seanix Technology Inc", 0x4D51 : "Mediaq Inc.", 0x4D54 : "Microtechnica Co Ltd", 0x4DDC : "ILC Data Device Corp.", 0x4E8 : "Samsung Windows Portable Devices", 0x5053 : "TBS/Voyetra Technologies", 0x508A : "Samsung T10 MP3 Player", 0x5136 : "S S Technologies", 0x5143 : "Qualcomm Inc. USA", 0x5333 : "S3 Graphics Co., Ltd", 0x544C : "Teralogic Inc", 0x5555 : "Genroco Inc.", 0x5853 : "Citrix Systems, Inc.", 0x6409 : "Logitec Corp.", 0x6666 : "Decision Computer International Co.", 0x6945 : "ASMedia Technology Inc.", 0x7604 : "O.N. Electric Co. Ltd.", 0x7d1 : "D-Link Corporation", 0x8080 : "Xirlink, Inc", 0x8086 : "Intel Corporation", 0x8087 : "Intel", 0x80EE : "Oracle Corporation - InnoTek Systemberatung GmbH", 0x8866 : "T-Square Design Inc.", 0x8888 : "Silicon Magic", 0x8E0E : "Computone Corporation", 0x9004 : "Adaptec Inc", 0x9005 : "Adaptec Inc", 0x919A : "Gigapixel Corp", 0x9412 : "Holtek", 0x9699 : "Omni Media Technology Inc.", 0x9710 : "MosChip Semiconductor Technology", 0x9902 : "StarGen, Inc.", 0xA0A0 : "Aopen Inc.", 0xA0F1 : "Unisys Corporation", 0xA200 : "NEC Corp.", 0xA259 : "Hewlett Packard", 0xA304 : "Sony", 0xA727 : "3com Corporation", 0xAA42 : "Abekas, Inc", 0xAC1E : "Digital Receiver Technology Inc", 0xB1B3 : "Shiva Europe Ltd.", 0xB894 : "Brown &amp; Sharpe Mfg. Co.", 0xBEEF : "Mindstream Computing", 0xC001 : "TSI Telsys", 0xC0A9 : "Micron/Crucial Technology", 0xC0DE : "Motorola", 0xC0FE : "Motion Engineering Inc.", 0xC622 : "Hudson Soft Co Ltd", 0xCA50 : "Varian, Inc", 0xCAFE : "Chrysalis-ITS", 0xCCCC : "Catapult Communications", 0xD4D4 : "Curtiss-Wright Controls Embedded Computing", 0xDC93 : "Dawicontrol", 0xDEAD : "Indigita Corporation", 0xDEAF : "Middle Digital, Inc", 0xE159 : "Tiger Jet Network Inc", 0xE4BF : "EKF Elektronik GMBH", 0xEA01 : "Eagle Technology", 0xEABB : "Aashima Technology B.V.", 0xEACE : "Endace Measurement Systems Ltd.", 0xECC0 : "Echo Digital Audio Corporation", 0xEDD8 : "ARK Logic, Inc", 0xF5F5 : "F5 Networks Inc.", 0xFA57 : "Interagon AS", } DEVICES = { 0x0033 : { 0x002F : "Spitfire VGA Accelerator", 0x0033 : "Spitfire VGA Accelerator", }, 0x003D : { 0x003a : "i740pci", 0x003d : "1740pci", 0x00D1 : "i740 PCI", }, 0x0070 : { 0x6800 : "Hauppage Nova -TD-500 DVB-T Tuner Device", 0x6800 : "Hauppage Nova -TD-500 DVB-T Tuner Device", }, 0x0402 : { 0x1050 : "ethernet controller", 0x5606 : "0x4752", 0x8086 : "video controller", 0x9665 : "ZCT8YBT'", }, 0x046D : { 0x0805 : "n.a.", 0x0808 : "Logitech Webcam C600", 0x0809 : "Webcam Pro 9000", 0x082B : "n/a", 0x0896 : "Camera", 0x08AD : "Quickcam Communicate STX", 0x08AF : "-", 0x08b2 : "logitech QuickCam Pro 4000", 0x08C6 : "Logitech OEM Webcam", 0x08f0 : "n/a", 0x08F6 : "QuickCam Communicate", 0x092F : "model number: V-UAP9", 0x0A0B : "Logitech ClearChat Pro USB", 0x0A1F : "Logitech G930 Headset", 0x5a61 : "", 0xC018 : "Baesline 3 Button Corded Optical Mouse", 0xC045 : "Epoxy Hidden", 0xC046 : "n/a", 0xc05b : "ftht", 0xC063 : "DELL 6-Button mouse", 0xC226 : "n/a", 0xC227 : "n/a", 0xC22D : "n/a", 0xC281 : "Wingman Force J-UA9", 0xC312 : "n/a", 0xC404 : "Logitech TrackMan Wheel", 0xC50E : " C-BS35", 0xC512 : "n/a", 0xC51B : "n/a", 0xc51e : "Unknown", 0xC526 : "n/a", 0xC52A : "HID Keyboard Device", 0xC52B : "USB Receiver for Wireless Mouse", 0xC52E : "USB3 receiver", 0xC52F : "Logitech Wireless Mouse USB Controller", }, 0x0483 : { 0x2016 : "Driver Windows xp", }, 0x04A9 : { 0x314D : "uhknown", }, 0x04B3 : { 0x24D5 : "Audio Controller", 0x401 : "PCIVEN_8086&DEV_293E&SUBSYS_20F217AA&REV_033&B1BFB68&0&D8 ", 0x401 : "PCIVEN_8086&DEV_24C6&REV_033 267A616A", 0x4010 : "PCIVEN_10EC&DEV_5227&SUBSYS_220C17AA&REV_01", 0x9876 : "PCIVEN_8086&DEV_1C3A&SUBSYS_1C3A1458&REV_04", }, 0x04D9 : { 0x1603 : "Samsung", 0x2011 : "n/a", }, 0x04F2 : { 0xb008 : ".oem44.inf", 0xB044 : "Webcam", 0xB175 : "SN", 0xB217 : "Integrated Camera", 0xB307 : "Webcam", }, 0x051D : { 0x0002 : "Inter (R) Core [TM] 2 Duo cpu", 0x051D : "0x051d", 0x9876 : "USBVID_0B05&PID_580FE6AZCY495152", }, 0x0553 : { 0x0200 : " Aiptek USA", }, 0x058f : { 0x0001 : "AM usb storage", 0x0107 : "0x01 ", 0x1234 : "6387", 0x1600 : "http://www.alldatasheet.com/datasheet-pdf/pdf/91600/ETC/AU9254A21.html", 0x6362 : "Unknown 4-in-1 card reader (istar)", 0x6366 : "Multi Flash Reader USB Device", 0x6387 : "USB Mass Storage Device(USB1005B Flash Disk)", 0x8CBA : "PCI Simple Communications Controller", 0x9254 : "http://www.alldatasheet.com/datasheet-pdf/pdf/91600/ETC/AU9254A21.html", 0x9380 : "Micron=MT29F32G08CBABA", 0x9540 : "SmartCard Reader", }, 0x0590 : { 0x0028 : "hid device class blood pressure monitor", }, 0x05ac : { 0x021e : "Alluminium Keyboard IT USB", 0x1293 : "Apple iPod 5", 0x1297 : "Apple iPhone 4", 0x21e : "Allumium keyboard it USB ", 0x8215 : "Broadcom BCM2046 Bluetooth chipset iMac", }, 0x05E1 : { 0x0408 : "USB 2.0 Video Capture Controller", 0x0501 : "web cam", }, 0x064e : { 0x064e : "Suyin", 0xa101 : "Acer Crystal Eye Webcam", 0xa103 : "WebCam", 0xa116 : "USB 2.0 UVC 0.3M WebCam", 0xA219 : "SUYIN 1.3M WebCam", 0xB250 : "1.3M HD WebCam", 0xc108 : "its a webcam software", 0xd101 : "Web Cam", 0xD217 : "HP TrueVision HD", }, 0x067B : { 0x2303 : "Prolific USB 2 Serial Comm Port &#1082;&#1086;&#1085;&#1090;&#1088;&#1086;&#1083;&#1083;&#1077;&#108", 0x2305 : "USB-to-Printer Bridge Controller", 0x2393 : "prolificz", 0x2506 : "Hi-Speed USB to IDE Bridge Controller", 0x25a1 : "Prolific PCLinq3 USB Transfer Cable Driver", 0x9876 : "TES", }, 0x06FE : { 0x9700 : "a netcard used usb interface", }, 0x093a : { 0x2468 : "http://genius.ru/products.aspx?pnum=24948&archive=1", 0x2600 : "http://www.speedlink.com/support/bin/24-02-2012/SETUP.rar", 0x2608 : "USBVID_093A&PID_2608&REV_0100&MI_00", 0x2620 : "WEBCAM http://www.canyon-tech.com/archive/voip/webcams/CNR-WCAM53#pr-switcher", }, 0x096E : { 0x0201 : " ", }, 0x0A5C : { 0x0201 : "Broadcom USB iLine10(tm) Network Adapter", 0x10DE : "Controlador sm", 0x2000 : "Broadcom Bluetooth Firmware Upgrade Device", 0x2009 : "Broadcom Bluetooth Controller", 0x200a : "Broadcom Bluetooth Controller", 0x200f : "Broadcom 802.11ac Network Adapter ", 0x201d : "BROADCOM Bluetooth Device", 0x201e : "IBM Integrated Bluetooth IV", 0x2020 : "Broadcom Bluetooth Dongle", 0x2021 : "BCM2035B3 ROM Adapter Generic", 0x2033 : "Broadcom Blutonium Device Firmware Downloader", 0x2035 : "BCM92035NMD Bluetooth", 0x2038 : "Broadcom Blutonium Device Firmware Downloader (BCM2038)", 0x2039 : "BROADCOM Bluetooth Device", 0x2045 : "Broadcom Bluetooth Controller", 0x2046 : "Broadcom USB Bluetooth Device", 0x2047 : "Broadcom USB Bluetooth Device", 0x205e : "Broadcom Bluetooth Firmware Upgrade Device", 0x2100 : "Broadcom Bluetooth 2.0+eDR USB dongle", 0x2101 : "Broadcom Bluetooth 2.0+EDR USB dongle", 0x2102 : "ANYCOM Blue USB-200/250", 0x2110 : "Broadcom Bluetooth Controller", 0x2111 : "ANYCOM Blue USB-UHE 200/250", 0x2120 : "Broadcom 2045 Bluetooth 2.0 USB-UHE Device with trace filter", 0x2121 : "Broadcom 2045 Bluetooth 2.0 USB Device with trace filter", 0x2122 : "Broadcom Bluetooth 2.0+EDR USB dongle", 0x2124 : "2045B3ROM Bluetooth Dongle", 0x2130 : "Broadcom 2045 Bluetooth 2.0 USB-UHE Device with trace filter", 0x2131 : "Broadcom 2045 Bluetooth 2.0 USB Device with trace filter", 0x2140 : "2046 Flash UHE Class 2", 0x2141 : "2046 Flash non UHE Class 2", 0x2142 : "2046 Flash non UHE Class 1", 0x2143 : "2046 Flash non UHE Class 1", 0x2144 : "2046 Flash non UHE module Class 2", 0x2145 : "Broadcom BCM9204MD LENO Module", 0x2146 : "Broadcom 2045 Bluetooth 2.1 USB UHE Dongle", 0x2147 : "Broadcom 2046 Bluetooth 2.1 USB Dongle", 0x2148 : "Broadcom 2046 Bluetooth 2.1 USB UHE Dongle", 0x2149 : "Broadcom 2046 Bluetooth 2.1 USB Dongle", 0x214a : "Broadcom 2046 Bluetooth 2.1 USB Module", 0x214b : "Broadcom 2046 Bluetooth 2.1 USB Module", 0x214c : "Broadcom 2046 Bluetooth 2.1 USB Module", 0x214d : "Broadcom Bluetooth 2.1 UHE Module", 0x214e : "Thinkpad Bluetooth with Enhanced Data Rate II", 0x214f : "Broadcom 2046 Bluetooth 2.1 USB UHE Dongle", 0x2150 : "Broadcom 2046 Bluetooth 2.1 USB Dongle", 0x2151 : "Broadcom Bluetooth 2.1 USB Dongle", 0x2152 : "Broadcom 2046 Bluetooth 2.1 USB UHE Dongle", 0x2153 : "Broadcom 2046 Bluetooth 2.1 USB UHE Dongle", 0x2154 : "Broadcom 2046 Bluetooth 2.1 USB UHE Dongle", 0x2155 : "Broadcom Bluetooth USB Dongle", 0x2157 : "BCM2046 B1 USB 500", 0x2158 : "Broadcom 2046 Bluetooth 2.1 Device", 0x219C : "Broadcom BCM2070 Bluetooth 3.0+HS USB Device ", 0x21E1 : ".0112", 0x21E3 : "Broadcom Bluetooth 4.0", 0x4500 : "Broadcom 2046 Bluetooth 2.1 USB Dongle", 0x4502 : "Broadcom 2046 Bluetooth 2.1 USB Dongle", 0x4503 : "Broadcom 2046 Bluetooth 2.1 USB Dongle", 0x5800 : "Unified Security Hub", 0x5801 : "Unified Security Hub ", 0x6300 : "Pirelli ISB Remote NDIS Device", 0x6688 : "NVIDIA GeForce GT 240M", 0x8613 : "TD 3104 USB vedio grabber ", 0x9876 : "0x9876", }, 0x0A92 : { 0x1010 : "1010&REV_0101&MI_00", }, 0x0AC8 : { 0x1234 : "1", 0x6719 : "8086&DEV_0F18&SUBSYS_16DD1043&REV_0E", 0x9876 : "1", }, 0x0b05 : { 0x170C : " RFHID", }, 0x0c45 : { 0x0C45 : "USB2.0", 0x1111 : "USB webcam", 0x5243 : "xda exec Uknown device", 0x6007 : "Genius WebCam Eye", 0x600D : "USB(v1.1) webcam", 0x602C : "home made", 0x602D : "USB Webcam", 0x6030 : "USB WebCam ", 0x610C : "usb web camera ", 0x6128 : "USB &#1074;&#1077;&#1073;-&#1082;&#1072;&#1084;&#1077;&#1088;&#1072;", 0x6128 : "USB PC Camera Plus", 0x6129 : "USB WebCam", 0x6130 : "USB HUB", 0x613A : "USB WEBCAM", 0x613c : "USB Webcam", 0x613E : "USB Camera", 0x624f : "Integrated Webcam in Compal HEL81 series barebones.", 0x6270 : "USB Microscopr", 0x6270 : "webcam with mic link works for win 7", 0x6270 : "webcam", 0x627F : "USBVID_17A1&PID_0118&REV_0100", 0x62B3 : "USB 2.0 PC Camera", 0x62BF : "USBVid_0c45&Pid_62bf&Rev_0100", 0x62c0 : "Sonix Webcam", 0x6353 : "USB Microscope", 0x641D : "1.3 MPixel Integrated Webcam used in Dell N5010 series", 0x6421 : "USB 2.0 Webcam slim 32", 0x642F : "Webcam", 0x644b : "oc45&oid 641d& 9:07&mi oo", 0x6489 : "Integrated Webcam Universal Serial Bus controllers", 0x6840 : "sonix 1.3 mp laptop integrated webcam", 0x7401 : "RDing TEMPer1V1.4", 0x9876 : "webcam", }, 0x0cf3 : { 0x1002 : "Wireless USB 2.0 adapter TL-WN821N", 0x3000 : "USBVID_0000&PID_00006&F763642&0&4", 0x3002 : "USBVID_0CF3&PID_3002&REV_0001", 0x3002 : "unknown", 0x3005 : "Atheros Bluetooth Module", 0x9271 : "TP-LINK 150 Mbps Wireless Lite N Adapter TL-WN721N", }, 0x0D8C : { 0x000E : "00", 0x0102 : "6206lc", 0x5200 : "0x5200", }, 0x0DF6 : { 0x9071 : "t9071t WL-113 - Wireless Network USB dongle 54g ", }, 0x0E11 : { 0x0001 : "PCI to EISA Bridge", 0x0002 : "D2ufG0 <a href=", 0x000F : "StorageWorks Library Adapter (HVD)", 0x0012 : "686P7", 0x0046 : "Smart Array 6400 Controller", 0x0049 : "Gigabit Upgrade Module", 0x004A : "Gigabit Server Adapter", 0x005A : "HP Remote Insight Lights-Out II Board", 0x00B1 : "HP Remote Insight Lights-Out II PCI Device", 0x00C0 : "64Bit", 0x0508 : "PCI UTP/STP Controller", 0x1000 : "Pentium Bridge", 0x2000 : "Pentium Bridge", 0x3032 : "n9XQeH <a href=", 0x3033 : "HTdOCe <a href=", 0x3034 : "GUI Accelerator", 0x4000 : "Pentium Bridge", 0x6010 : "HotPlug PCI Bridge", 0x7020 : "USB Controller", 0xA0EC : "Original Compaq fibre Channel HBA", 0xA0F0 : "Advanced System Management Controller", 0xA0F3 : "Triflex PCI to ISA PnP Bridge", 0xA0F7 : " device 4", 0xA0F8 : "USB Open Host Controller", 0xA0FC : "Tachyon TL 64-bit/66-Mhz FC HBA", 0xAe10 : "Smart-2 Array Controller", 0xAE29 : "PCI to ISA Bridge", 0xAE2A : "CPU to PCI Bridge", 0xAE2B : "PCI to ISA PnP Bridge", 0xAE31 : "System Management Controller", 0xAE32 : "Netelligent 10/100 TX PCI UTP TLAN 2.3", 0xAE33 : "Dual EIDE Controller", 0xAE34 : "Netelligent 10 T PCI UTP TLAN 2.3", 0xAE35 : "Integrated NetFlex 3/P TLAN 2.3", 0xAE40 : "Dual Port Netelligent 10/100 TX PCI TLAN", 0xAE43 : "Integrated Netelligent 10/100 TX PCI", 0xAE69 : "PCI to ISA Bridge", 0xAE6C : "PCI Bridge", 0xAE6D : "CPU to PCI Bridge", 0xB011 : "Dual Port Netelligent 10/100 TX", 0xB012 : "UTP/Coax PCI", 0xB01E : "Fast Ethernet NIC", 0xB01F : "Fast Ethernet NIC", 0xB02F : "Ethernet NIC", 0xB030 : "10/100TX Embedded UTP/Coax Controller", 0xB04A : "10/100TX WOL UTP Controller", 0XB060 : "SMART2 Array Controller", 0xB0C6 : "Fast Ethernet Embedded Controller w/ WOL", 0xB0C7 : "Fast Ethernet NIC", 0xB0D7 : "Fast Ethernet NIC", 0xB0DD : "Fast Ethernet NIC", 0xB0DE : "Fast Ethernet NIC", 0xB0DF : "Gigabit Module", 0xB0E0 : "Gigabit Module", 0xB0E1 : "Fast Ethernet Module", 0xB123 : "Gigabit NIC", 0xB134 : "Fast Ethernet NIC", 0xB13C : "Fast Ethernet NIC", 0xB144 : "Fast Ethernet NIC", 0xB163 : "Fast Ethernet NIC", 0xB164 : "Fast Ethernet Upgrade Module", 0xB178 : "SMART2 Array Controller", 0xB196 : "Conexant SoftK56 Modem", 0xB1A4 : "Gigabit Server Adapter", 0xB203 : "Integrated Lights Out Processor", 0xB204 : "Integrated Lights Out Processor", 0xF095 : "HP StorageWorks 2 Gb", 0xF130 : "ThunderLAN 1.0 NetFlex-3/P", 0xF150 : "ThunderLAN 2.3 NetFlex-3/P with BNC", 0xF700 : "LP7000 Compaq/Emulex Fibre Channel HBA", 0xF800 : "LP8000 Compaq/Emulex Fibre Channel HBA", }, 0x0E8D : { 0x0002 : "PCI Simple Communications Controller / Mobiles China", 0x0003 : "usb", }, 0x1000 : { 0x0001 : "PCI-SCSI I/O Processor", 0x0002 : "Fast-wide SCSI gg", 0x0003 : "PCI to SCSI I/O Processor", 0x0004 : "SCSI raid controllers", 0x0005 : "Fast SCSI", 0x0006 : "PCI to Ultra SCSI I/O Processor", 0x000A : "PCI Dual Channel Wide Ultra2 SCSI Ctrlr", 0x000B : "PCI Dual Channel Wide Ultra2 SCSI Ctrlr", 0x000C : "PCI to Ultra2 SCSI I/O Processor", 0x000D : "Ultra Wide SCSI", 0x000F : "PCI to Ultra SCSI I/O Processor", 0x0010 : "I2O-Ready PCI RAID Ultra2 SCSI Ctrlr", 0x0012 : "PCI to Ultra2 SCSI Controller", 0x0013 : "PCI to Ultra SCSI Controller", 0x0020 : "PCI to Dual Channel Ultra3 SCSI Ctrlr", 0x0021 : "PCI to Ultra160 SCSI Controller", 0x0030 : "PCI-X to Ultra320 SCSI Controller", 0x0031 : "PCI-X SCSI Controller", 0x0032 : "PCI-X to Ultra320 SCSI Controller", 0x0035 : "PCI-X SCSI Controller", 0x0040 : "PCI-X to Ultra320 SCSI Controller", 0x0050 : "LSISAS1068E / LSI SAS 6i RAID Controller", 0x0054 : "LSI Adapter SAS 3000 series 8-port with 1068", 0x0056 : "PCI-Express Fusion-MPT SAS", 0x0058 : "PCI-Express Fusion-MPT SAS", 0x005B : "LSI Logic / Symbios Logic MegaRAID SAS 2208 [Thunderbolt] [1000:005b] (rev 05)", 0x005e : "PCI-X Fusion-MPT SAS", 0x0060 : "0x10f9", 0x0062 : "PCI-Express Fusion-MPT SAS", 0x0064 : "PCI-Express Fusion-MPT SAS 2.0", 0x0072 : "Dell PERC H200", 0x0073 : "IBM ServeRAID M1015", 0x0080 : "PCI-Express Fusion-MPT SAS 2.0/2.5", 0x008F : "LSI 53C8xx SCSI host adapter chip", 0x0097 : "SAS3008 PCI-Express Fusion-MPT SAS-3", 0x0408 : "U320-2E Raid Controller", 0x0621 : "Fibre Channel I/O Processor", 0x0622 : "Dial Channel Fibre Channel I/O Processor", 0x0623 : "Dual Channel Fibre Channel I/O Processor", 0x0624 : "Fibre Channel I/O Processor", 0x0625 : "Fibre Channel I/O Processor", 0x0626 : "Fibre Channel Adapter", 0x0628 : "Fibre Channel Adapter", 0x0630 : "Fibre Channel I/O Processor", 0x0640 : "Fibre Channel Adapter", 0x0642 : "Fibre Channel Adapter", 0x0646 : "Fibre Channel Adapter", 0x0701 : "10/100 MBit Ethernet", 0x0702 : "Gigabit Ethernet Controller", 0x0901 : "USB Controller", 0x1000 : "Fast SCSI Controller", 0x1001 : "Symbios Ultra2 SCSI controller", 0x1010 : "Single channel SCSI controller", 0x1020 : "LSI Logic MegaRAID 320-1 Dell PowerEdge PERC 4/SC", 0x1960 : "RAID Controller", 0x3050 : "PCI-Express Fusion-MPT SAS 2.0", 0x9876 : "5946504E44383243", }, 0x1001 : { 0x0010 : "PCI 1616", 0x0011 : "OPTO-PCI", 0x0012 : "PCI-AD", 0x0013 : "PCI-OptoRel", 0x0014 : "Timer", 0x0015 : "PCI-DAC416", 0x0016 : "PCI-MFB high-speed analog I/O", 0x0017 : "PROTO-3 PCI", 0x0020 : "Universal digital I/O PCI-Interface", }, 0x1002 : { 0x4370 : "RV370", 0x6760 : "6470M", 0x0000 : "{4D36E972-E325-11CE-BFC1-08002BE10318}0016", 0x0002 : "EMU10K1", 0x000D : "bhjkh", 0x0180 : "LXPAY0Y001926158A92000 ", 0x0300 : "1002", 0x0B12 : "R580", 0x1002 : "0x675d pcie", 0x1002 : "RV360", 0x1028 : "R9 200", 0x1043 : "RV410", 0x11 : "0x215r2qzua21", 0x1111 : "ATI Technologies Inc. / Advanced Micro Devices", 0x1202 : "1202", 0x1313 : "Q770", 0x1314 : "1314", 0x1400 : "1400", 0x1401 : "1401", 0x1402 : "1402", 0x1403 : "1403", 0x1404 : "1404", 0x1405 : "1405", 0x1410 : "1410", 0x1417 : "1417", 0x1419 : "1419", 0x1631 : "0000", 0x1714 : "A4-3400", 0x1ab8 : "2", 0x3150 : "M24", 0x3151 : "RV380", 0x3152 : "M24", 0x3154 : "M24GL", 0x3171 : "RV380", 0x3E50 : "PCIVEN_1002&DEV_68A8&SUBSYS_159B103C&REV_00", 0x3E54 : "RV380GL", 0x3E70 : "RV380", 0x3E74 : "RV380GL", 0x4136 : "A3", 0x4137 : "RS200", 0x4143 : "9550", 0x4144 : "R300", 0x4145 : "R300", 0x4146 : "R300", 0x4147 : "R300GL", 0x4148 : "R350", 0x4149 : "R350", 0x4150 : "RV_00", 0x4151 : "RV350", 0x4152 : "RV360", 0x4153 : "RV350", 0x4154 : "RV350GL", 0x4155 : "RV350", 0x4158 : "AA01", 0x4164 : "R300", 0x4166 : "R300", 0x4167 : "R300GL", 0x4168 : "R350", 0x4169 : "R350", 0x4170 : "RV_00", 0x4171 : "RV350", 0x4172 : "REV_00", 0x4173 : "4484", 0x4174 : "RV350GL", 0x4175 : "RV350", 0x4242 : "R200AIW", 0x4243 : "", 0x4336 : "rs200", 0x4337 : "RS200M", 0x4341 : "SB200", 0x4342 : "SB200", 0x4345 : "SB200", 0x4347 : "SB200", 0x4348 : "SB200", 0x4349 : "SB200", 0x434C : "SB200", 0x434d : "SB200", 0x4353 : "SB200", 0x4354 : "215CT", 0x4358 : "113-A52021-104", 0x4361 : "ALC665", 0x4363 : "SB300", 0x4369 : "IXP 3xx", 0x436E : "IXP 3xx", 0x4370 : "SB400", 0x4371 : "IXP SB400", 0x4372 : "SMBus Controller", 0x4373 : "IXP SB400", 0x4374 : "IXP SB400", 0x4375 : "IXP SB400", 0x4376 : "SB4xx", 0x4377 : "IXP SB400", 0x4378 : "SB400", 0x4379 : "SB400 / SB450 (Sil3112)", 0x437A : "SB4xx", 0x437B : "SB450", 0x4380 : "ATI SB600", 0x4380 : "ATI SB600", 0x4380 : "ATI RS690m", 0x4381 : "ATI ?", 0x4383 : "SB700", 0x4384 : "4384", 0x4385 : "ATI RD600/RS600", 0x4386 : "690G", 0x4387 : "4387", 0x4388 : "4388", 0x4389 : "4389", 0x438A : "438A", 0x438B : "438B", 0x438C : "RD600/RS600", 0x438D : "SB600", 0x439 : "rv360", 0x4390 : "SB750", 0x4391 : "ATI SB700", 0x4391 : "AMD SB850", 0x4392 : "ATI SB700", 0x4393 : "ATI SB850", 0x4394 : "5100", 0x4396 : "210888CX", 0x4397 : "4397", 0x4398 : "SB700", 0x439C : "SB7xx", 0x439D : "SB700 LPC", 0x43A0 : "43A0", 0x43A3 : "43A3", 0x4437 : "ATI Mobility Radeon 7000 IGP", 0x4554 : "Mach64 ET", 0x4654 : "Mach64 VT", 0x4742 : "(GT-C2U2)", 0x4744 : "Rage 3D Pro AGP 2x", 0x4747 : "GT-C2U2", 0x4749 : "RAGE PRO TURBO AGP 2X", 0x474C : "Rage XC PCI-66", 0x474D : "Rage XL AGP 2x", 0x474E : "Rage XC AGP 2x", 0x474F : "Rage XL PCI-66", 0x4750 : "1039", 0x4751 : "0x1002", 0x4752 : "Rage XL PCI", 0x4753 : "Rage XC PCI", 0x4754 : "Mach 64 VT", 0x4755 : "Rage 3D II+pci", 0x4756 : "Rage 3D IIC AGP", 0x4757 : "3D 11C AGP", 0x4758 : "210888GX", 0x4759 : "215r2qzua21", 0x475A : "215r2qua12", 0x4966 : "RV250", 0x4967 : "RV250", 0x496E : "RV250", 0x496F : "RV250", 0x4A48 : "R420", 0x4a49 : "R420", 0x4A4A : "R420", 0x4a4b : "R420", 0x4A4C : "R420", 0x4A4D : "R420GL", 0x4A4E : "M18", 0x4A4F : "R420", 0x4A50 : "R420", 0x4A54 : "R420", 0x4A68 : "R420", 0x4A69 : "R420", 0x4A6A : "R420", 0x4a6b : "R420", 0x4A6C : "R420", 0x4A6D : "R420GL", 0x4A6F : "R420", 0x4A70 : "R420", 0x4A74 : "R420", 0x4B49 : "R481", 0x4B4B : "R481", 0x4B4C : "R481", 0x4B69 : "R481", 0x4B6A : "R481", 0x4B6B : "R481", 0x4B6C : "R481", 0x4C42 : "B10E0E11", 0x4C44 : "Rage 3D LT Pro AGP", 0x4C45 : "", 0x4C46 : "Mobility M3 AGP", 0x4C47 : "ati rage pro", 0x4C49 : "123", 0x4C4D : "01541014", 0x4C4E : "216lo sasa25", 0x4C50 : "unknown", 0x4C51 : "113", 0x4C52 : "1241243", 0x4C53 : "216L0SASA25", 0x4C54 : "4372", 0x4C57 : "M7 [LW]", 0x4C58 : "(0x71C1) DAC type: Internal DAC(400MHz) ", 0x4C59 : "ATI Mobility Radeon 7000 with 16MB (7500 with 16 o", 0x4C5A : "", 0x4C64 : "", 0x4C66 : "RV250", 0x4C6E : "0x4C6E", 0x4D46 : "ATI mobility128", 0x4D4C : "216l0sasa25", 0x4D52 : "ATI Theater 550 Pro", 0x4D53 : "TVT2 Wonder Elite", 0x4E44 : "R300", 0x4E45 : "R300", 0x4e46 : "R300", 0x4E47 : "R300GL", 0x4E48 : "R350", 0x4E49 : "R350", 0x4E4A : "R360", 0x4E4B : "R350GL", 0x4E50 : "M10", 0x4E51 : "RV350", 0x4E52 : "M10", 0x4E54 : "M10GL", 0x4E56 : "M12", 0x4E64 : "R300", 0x4E65 : "R300", 0x4e66 : "R300", 0x4E67 : "R300GL", 0x4E68 : "R350", 0x4E69 : "R350", 0x4E6A : "R360", 0x4E6B : "R350GL", 0x4E71 : "RV350", 0x5041 : "gt", 0x5042 : "rage 128 pf pro agp ", 0x5043 : "1231324445", 0x5044 : "rv100", 0x5045 : "", 0x5046 : "R128", 0x5047 : "215R3BUA22", 0x5048 : "8212104D", 0x5049 : "R128", 0x504A : "Rage 128 Pro PJ PCI", 0x504B : "Rage 128 Pro PK AGP", 0x504C : "Rage 128 Pro PL AGP", 0x504D : "Rage 128 Pro PM PCI", 0x504E : "Rage 128 Pro PN AGP", 0x504F : "Rage 128 Pro PO AGP", 0x5050 : "Scheda Grafica Standard PCI(VGA)", 0x5051 : "Rage 128 Pro PQ AGP", 0x5052 : "Rage 128 Pro PR AGP", 0x5053 : "Rage 128 Pro PS PCI", 0x5054 : "Rage 128 Pro PT AGP", 0x5055 : "rage 128 pro agp 4x tmds", 0x5056 : "Rage 128 Pro PV PCI", 0x5057 : "Rage 128 Pro PW AGP", 0x5058 : "Rage 128 Pro", 0x5144 : "Radeon 7200 QD SDR/DDR", 0x5145 : "", 0x5146 : "", 0x5147 : "", 0x5148 : "R200", 0x5149 : "", 0x514A : "", 0x514B : "", 0x514C : "R200", 0x514D : "R200", 0x514E : "", 0x514F : "", 0x5157 : "RV200", 0x5158 : "radeon 9200", 0x5159 : "RV100", 0x515A : "", 0x515E : "Radeon ES1000", 0x5168 : "ati", 0x5169 : "", 0x516A : "", 0x516B : "", 0x516C : "E7505", 0x516D : "R200", 0x5245 : "215R2QZUA21", 0x5246 : "Rage 128", 0x5247 : "Rage 32MB", 0x524B : "g01080-108", 0x524C : "", 0x5345 : "", 0x5346 : "Rage 128 SF 4x AGP 2x", 0x5347 : "", 0x5348 : "", 0x534B : "Rage 128 SK PCI", 0x534C : "Rage 128 SL AGP 2x", 0x534D : "Rage 128 SM AGP 4x", 0x534E : "Rage 128 4x", 0x5354 : "", 0x5446 : "unknown", 0x544C : "", 0x5452 : "5452", 0x5455 : "", 0x5457 : "RS200M", 0x5460 : "M22", 0x5461 : "M22", 0x5462 : "M24C", 0x5464 : "M22GL", 0x5548 : "R423", 0x5549 : "R423", 0x554A : "R423", 0x554b : "R423", 0x554D : "R430", 0x554E : "R430", 0x554F : "R430", 0x5550 : "R423GL", 0x5551 : "R423GL", 0x5568 : "R423", 0x5569 : "R423", 0x556A : "R423", 0x556B : "R423", 0x556D : "R430", 0x556E : "R430", 0x556F : "R430", 0x5570 : "R423GL", 0x5571 : "R423GL", 0x564A : "M26GL", 0x564B : "M26GL", 0x564F : "M26", 0x5652 : "M26", 0x5653 : "RV410", 0x5654 : "264VT", 0x5655 : "", 0x5656 : "Mach 64 VT4 PCI", 0x5657 : "RV410", 0x5673 : "M26", 0x5677 : "RV410", 0x5830 : "RS300", 0x5831 : "RS300", 0x5832 : "RS300", 0x5833 : "RS300M", 0x5834 : "RS300", 0x5835 : "RS300M", 0x5838 : "RS330M", 0x5854 : "RS480", 0x5874 : "RS482", 0x5940 : "RV280", 0x5941 : "RV280", 0x5950 : "RS480", 0x5954 : "RS482", 0x5955 : "RS480M", 0x5960 : "RV280", 0x5960 : "A051400005470", 0x5961 : "RV280", 0x5962 : "Rev_01", 0x5964 : "Radeon 9200", 0x5965 : "unknown", 0x5974 : "RS482", 0x5974 : "RS482", 0x5975 : "RS482M (200M)", 0x5a23 : "RD890", 0x5a31 : "RS400/133", 0x5A33 : "RC410", 0x5A3F : "5A3F", 0x5A41 : "0x5A41 ATI RADEON Xpress 1200 Series 0x1002", 0x5A41 : "RS400", 0x5A42 : "RS400M", 0x5A43 : "RS400", 0x5A60 : "SUBSYS_FF311179", 0x5A61 : "RC410", 0x5A61 : "RC410", 0x5A62 : "PCIVEN_1002&DEV_5A62&SUBSYS_2A051584&REV_00", 0x5A63 : "RC410", 0x5b60 : "RV370", 0x5b62 : "RV380x", 0x5B63 : "REV_004&178951BB&0&0008", 0x5B64 : "RV370GL", 0x5B65 : "RV370", 0x5B60 : "RV370", 0x5B70 : "RV380", 0x5B72 : "RV380x", 0x5B73 : "RV370", 0x5B74 : "RV370GL", 0x5B75 : "RV370", 0x5C61 : "bk-ati ver008.016m.085.006", 0x5C63 : "RV280 (M9+)", 0x5D44 : "RV280", 0x5D45 : "RV280", 0x5D48 : "M28", 0x5D49 : "M28GL", 0x5d4a : "M28", 0x5d4d : "R480", 0x5d4f : "R480", 0x5D50 : "R480GL", 0x5d52 : "R480", 0x5D57 : "R423", 0x5d6d : "R480", 0x5D6F : "R480", 0x5D70 : "R480GL", 0x5D72 : "R480", 0x5D77 : "R423", 0x5E48 : "RV410GL", 0x5E4A : "RV410", 0x5E4B : "RV410", 0x5E4C : "RV410", 0x5E4D : "RV410", 0x5E4F : "RV410", 0x5E68 : "RV410GL", 0x5E6A : "RV410", 0x5E6B : "RV410", 0x5E6C : "RV410", 0x5E6D : "RV410", 0x5E6F : "RV410", 0x6076 : "123123132", 0x6600 : "AMD", 0x6718 : "CAYMAN XT", 0x6719 : "Cayman", 0x6738 : "HD6870", 0x6739 : "Barts (Pro)", 0x673E : "0x2310", 0x6740 : "Powered by AMD", 0x6741 : "Whistler", 0x6741 : "AMD Radeon HD 7450M (6470M)&#12289;6630M&#12289;In", 0x6749 : "unknown", 0x674A : "V3900", 0x6750 : "1996", 0x6758 : "NI", 0x6759 : "1996", 0x675D : "HD7570", 0x6760 : "3305M", 0x6761 : "AMD Radeon HD 6430M", 0x6778 : "7470", 0x6779 : "AMD Radeon HD 6470m", 0x677B : "Unknown", 0x6798 : "6798", 0x6810 : "R9 200", 0x6819 : "AMD Radeon HD 7800 Series", 0x6840 : "SUBSY", 0x6841 : "subsys 1789103c", 0x6898 : "EG CYPRESS XT", 0x6899 : "EG CYPRESS PRO", 0x689C : "EG Cypress XT HEMLOCK", 0x68A0 : "EG BROADWAY XT", 0x68A1 : "EG BROADWAY PRO/LP", 0x68A8 : "AMD Radeon HD6870M (at least the one from Dell)", 0x68B0 : "EG BROADWAY XT", 0x68B8 : "EG JUNIPER XT", 0x68BA : "1482174B", 0x68BE : "EG JUNIPER LE", 0x68C1 : "DEV_68C1&SUBSYS_144A103C&REV_00", 0x68C8 : "RV830", 0x68c9 : "RV830", 0x68D8 : "Redwood", 0x68D9 : "RV830/Redwood", 0x68E0 : "HD 5470", 0x68E4 : "RV810", 0x68f9 : "Cedar", 0x700F : "A3/U1", 0x7010 : "RS200", 0x7100 : "R520", 0x7101 : "M58", 0x7102 : "PCIE", 0x7103 : "M58GL", 0x7104 : "R520GL", 0x7105 : "R520GL", 0x7106 : "M58GL", 0x7108 : "R520", 0x7109 : "R520", 0x710A : "R520", 0x710B : "R520", 0x710C : "R520", 0x710E : "R520GL", 0x710F : "R520GL", 0x7120 : "R520", 0x7124 : "R520GL", 0x7125 : "R520GL", 0x7128 : "R520", 0x7129 : "R520", 0x712A : "R520", 0x712B : "R520", 0x712C : "R520", 0x712E : "R520GL", 0x712F : "R520GL", 0x7140 : "RV515", 0x7142 : "RV515", 0x7143 : "RV515", 0x7145 : "M54", 0x7146 : "RV505", 0x7147 : "RV515", 0x7149 : "M52", 0x714A : "M52", 0x714B : "M52", 0x714C : "M52", 0x714D : "RV515", 0x714E : "RV515PCI", 0x7152 : "RV515GL", 0x7153 : "RV515GL", 0x715E : "RV515", 0x715F : "RV515", 0x7160 : "RV515", 0x7162 : "RV515", 0x7163 : "RV515", 0x7166 : "RV515", 0x7167 : "RV515", 0x716D : "RV515", 0x716E : "RV515PCI", 0x7172 : "RV515GL", 0x7173 : "RV515GL", 0x717E : "RV515", 0x717F : "RV515", 0x7180 : "RV515", 0x7181 : "RV515", 0x7183 : "RV515", 0x7186 : "M54", 0x7187 : "RV515", 0x7188 : "M64", 0x718A : "M54", 0x718B : "M52", 0x718C : "M52", 0x718D : "M54", 0x718F : "RV515PCI", 0x7193 : "RV515", 0x7196 : "M62", 0x719B : "RV515", 0x719F : "RV515", 0x71A0 : "RV515", 0x71A1 : "RV515", 0x71A3 : "RV515", 0x71A7 : "RV515", 0x71AF : "RV515PCI", 0x71B3 : "RV515", 0x71BB : "RV515", 0x71C0 : "RV530", 0x71C1 : "RV535", 0x71c2 : "RV530", 0x71C3 : "RV535", 0x71C4 : "M56GL", 0x71c5 : "M56", 0x71C5 : "M56", 0x71C6 : "RV530", 0x71C7 : "RV535", 0x71CD : "RV530", 0x71ce : "RV530", 0x71D2 : "RV530GL", 0x71D4 : "M56GL", 0x71D5 : "M56", 0x71D6 : "M56", 0x71DA : "RV530GL", 0x71DE : "M56", 0x71E0 : "RV530", 0x71E1 : "RV535", 0x71e2 : "RV530", 0x71E3 : "RV535", 0x71E6 : "RV530", 0x71E7 : "RV535", 0x71ED : "RV530", 0x71EE : "RV530", 0x71F2 : "RV530GL", 0x71FA : "RV530GL", 0x7205 : "1106", 0x7210 : "M71", 0x7211 : "M71", 0x7240 : "R580", 0x7243 : "R580", 0x7244 : "R580", 0x7245 : "R580", 0x7246 : "R580", 0x7247 : "R580", 0x7248 : "R580", 0x7249 : "R580", 0x724A : "R580", 0x724B : "R580", 0x724C : "R580", 0x724D : "R580", 0x724E : "R580", 0x724F : "R580", 0x7260 : "R580", 0x7263 : "R580", 0x7264 : "R580", 0x7265 : "R580", 0x7266 : "R580", 0x7267 : "R580", 0x7268 : "R580", 0x7269 : "R580", 0x726A : "R580", 0x726B : "R580", 0x726C : "R580", 0x726D : "R580", 0x726E : "R580", 0x726F : "R580", 0x7280 : "R580", 0x7284 : "M58", 0x7286 : "R580", 0x7288 : "R580", 0x7291 : "R560", 0x7293 : "R580", 0x72A0 : "R580", 0x72A8 : "R580", 0x72B1 : "R580", 0x72B3 : "R580", 0x7801 : "7801", 0x7807 : "7807", 0x7808 : "7808", 0x7809 : "7809", 0x780B : "780B", 0x780D : "780D", 0x780E : "780E", 0x780F : "780F", 0x7814 : "7814", 0x7833 : "RS350", 0x79 : "unknown", 0x791 : "RS690M", 0x7910 : "7910", 0x7912 : "7912", 0x7914 : "7914", 0x7915 : "7915", 0x7916 : "7916", 0x791a : "791A", 0x791E : "RS690", 0x791F : "RS690M", 0x7912 : "SUBSYS_826D1043", 0x7937 : "Samsung R25P", 0x793F : "RS600", 0x7941 : "RS690M", 0x7942 : "RS600M", 0x796E : "RS690", 0x8086 : "1050", 0x9000 : "RV350", 0x9094 : "RV730", 0x9400 : "R600", 0x9401 : "R600", 0x9402 : "R600", 0x9403 : "R600", 0x9405 : "R600", 0x940A : "R600GL", 0x940B : "R600GL", 0x940F : "R600GL", 0x9440 : "RV770", 0x9441 : "R700", 0x9442 : "RV770", 0x9443 : "R700", 0x9444 : "RV770", 0x9446 : "RV770", 0x9447 : "R700", 0x944A : "M98", 0x944B : "M98", 0x944C : "RV770", 0x944E : "RV770", 0x9450 : "RV770", 0x9452 : "RV770", 0x9456 : "RV770", 0x945A : "M98", 0x9460 : "RV790", 0x9462 : "RV790", 0x9480 : "M96", 0x9487 : "RV730", 0x9488 : "M96", 0x948F : "RV730", 0x9490 : "RV730", 0x9491 : "M96", 0x9495 : "RV730", 0x9498 : "RV730", 0x949C : "RV730", 0x949E : "RV730", 0x949F : "RV730", 0x94A0 : "M97", 0x94A1 : "M97", 0x94A3 : "M97", 0x94B1 : "RV740", 0x94B3 : "RV740", 0x94B4 : "RV740", 0x94B5 : "AA38", 0x94C1 : "RV610-DT (Pro)", 0x94C3 : "RV610", 0x94C4 : "RV610LE", 0x94C5 : "RV610", 0x94C7 : "RV610", 0x94C8 : "M72", 0x94C9 : "M72", 0x94CB : "M72", 0x94CC : "RV610", 0x9501 : "RV670 XT", 0x9504 : "M76", 0x9505 : "RV630", 0x9506 : "M76", 0x9507 : "RV670", 0x9508 : "M76", 0x9509 : "M76", 0x950F : "R680", 0x9511 : "RV630GL", 0x9513 : "R680", 0x9515 : "RV670 AGP", 0x9519 : "RV670", 0x9540 : "RV710", 0x9541 : "RV710", 0x954E : "RV710", 0x954F : "RV710", 0x9552 : "M92", 0x9553 : "M92", 0x9555 : "M93", 0x9557 : "M93", 0x9581 : "M76M", 0x9583 : "M76", 0x9586 : "RV630", 0x9587 : "RV630 PRO", 0x9588 : "RV630 XT", 0x9589 : "&#1055;&#1056;&#1054; RV630", 0x958B : "M76", 0x958C : "RV630GL", 0x958D : "RV630GL", 0x958E : "RV630", 0x958F : "M76", 0x9590 : "RV630", 0x9591 : "M86-M", 0x9593 : "M86", 0x9595 : "M86", 0x9596 : "RV630", 0x9597 : "RV630", 0x9598 : "RV630", 0x9599 : "RV630", 0x959B : "M86", 0x95C0 : "RV610", 0x95C2 : "M72", 0x95c4 : "M82-S", 0x95C5 : "RV620 LE", 0x95C6 : "RV620", 0x95C7 : "RV610", 0x95C9 : "RV620", 0x95CC : "RV620", 0x95CD : "RV610", 0x95CE : "RV610", 0x95CF : "RV610", 0x9610 : "RS780", 0x9611 : "RS780", 0x9612 : "RS780M", 0x9613 : "RS780M", 0x9614 : "RS780", 0x9615 : "RS780", 0x9616 : "RS780", 0x9644 : "A4-3420", 0x9647 : "AMD A6-3410MX APU With AMD Radeon HD 6520G", 0x9648 : " 9648", 0x9649 : "HD 6480G", 0x970F : "970F", 0x9710 : "RS880", 0x9711 : "RS880", 0x9712 : "4250", 0x9713 : "RS880MC", 0x9715 : "RS880", 0x9802 : "AMD E-350", 0x9803 : "2411E6FE", 0x9804 : "AMD Radeon HD 6310 Graphics AMD Radeon HD 6310 Gr", 0x9806 : "AMD Radeon HD 6320", 0x9807 : "unknow", 0x9808 : "E2-1800", 0x9809 : "7310M", 0x9834 : "AMD Radeon HD 8210", 0x9851 : "3801", 0x9876 : "ATI GTC (GT-C2U2)", 0x9902 : "9902", 0x9998 : "9998", 0x9999 : "(0x9498", 0x999C : "999C", 0xAA01 : "Ati Function driver for high definition audio1", 0xAA08 : "All with HDMI support", 0xAA10 : "677", 0xAA20 : "RV630", 0xAA28 : "3400", 0xAA58 : "AA58", 0xaa68 : " 0x040300", 0xAC12 : "Theater HD T507", 0xCAB0 : "A3/U1", 0xCAB1 : "A3/U1", 0xcab2 : "RS200", 0xCBB2 : "RS200", 0x0876 : "", 7800 : "", }, 0x1003 : { 0x0201 : "GUI Accelerator", }, 0x1004 : { 0x0005 : "DEV_0200", 0x0006 : "ISA Bridge", 0x0007 : "Wildcat System Controller", 0x0008 : "Wildcat ISA Bridge", 0x0009 : "", 0x000C : "", 0x000D : "", 0x0100 : "CPU to PCI Bridge for notebook", 0x0101 : "Peripheral Controller", 0x0102 : "PCI to PCI Bridge", 0x0103 : "PCI to ISA Bridge", 0x0104 : "Host Bridge", 0x0105 : "IrDA Controller", 0x0200 : "RISC GUI Accelerator", 0x0280 : "RISC GUI Accelerator", 0x0304 : "ThunderBird PCI Audio Accelerator", 0x0305 : "ThunderBird joystick port", 0x0306 : "ThunderBird 16650 UART", 0x0307 : "Philips Seismic Edge 705", 0x0308 : "Philips PSC705 GamePort Enumerator", 0x0702 : "Golden Gate II", }, 0x1006 : { 0x3044 : "OHCI Compliant IEEE 1394 Host Controller", }, 0x1008 : { 0x9876 : "23", }, 0x100A : { 0x8235 : "U87088R06", }, 0x100B : { 0x0001 : "10/100 Ethernet MAC", 0x0002 : "PCI-IDE DMA Master Mode Interface Ctrlr", 0x000E : "Legacy I/O Controller", 0x000F : "IEEE 1394 OHCI Controller", 0x0011 : "PCI System I/O", 0x0012 : "USB Controller", 0x001B : "Advanced PCI Audio Accelerator", 0x0020 : "MacPhyter 10/100 Mb/s Ethernet MAC & PHY", 0x0020 : "10/100 MacPhyter3v PCI Adapter", 0x0021 : "PCI to ISA Bridge", 0x0022 : "10/100/1000 Mb/s PCI Ethernet NIC", 0x0028 : "PCI Host Bridge", 0x002A : "GeodeLink PCI South Bridge", 0x002D : "Geode IDE Controller", 0x002E : "GEODE - GX3 Audio CS5535", 0x002F : "USB Controller", 0x0030 : "Geode VGA Compatible Device", 0x0500 : "LPC Bridge and GPIO", 0x0501 : "SMI Status and ACPI", 0x0502 : "IDE Controller", 0x0503 : "XpressAUDIO", 0x0504 : "Video Processor", 0x0505 : "X-Bus Expansion Interface", 0x0510 : "LPC Bridge and GPIO", 0x0511 : "SMI Status and ACPI", 0x0515 : "X-Bus Expansion Interface", 0x23 : "", 0xD001 : "PCI-IDE Interface", }, 0x100C : { 0x3202 : "GUI Accelerator", 0x3205 : "GUI Accelerator", 0x3206 : "GUI Accelerator", 0x3207 : "GUI Accelerator", 0x3208 : "Graphics/Multimedia Engine", 0x4702 : "", }, 0x100E : { 0x0564 : "Host Bridge", 0x55CC : "South Bridge", 0x9000 : "WeitekPower GUI Accelerator", 0x9001 : "GUI Accelerator", 0x9100 : "GUI Accelerator", }, 0x1011 : { 0x0001 : "PCI-PCI Bridge", 0x0002 : "Tulip Ethernet Adapter", 0x0004 : "PCI Graphics Accelerator", 0x0007 : "NV-RAM", 0x0008 : "SCSI to SCSI Adapter", 0x0009 : "Fast Ethernet Ctrlr", 0x000A : "Video Codec", 0x000C : "6IfPpL <a href=", 0x000D : "TGA2 PDXGB", 0x000F : "FDDI", 0x0014 : "Tulip Plus Ethernet Adapter", 0x0016 : "ATM", 0x0019 : "Olicom RapidFire 2327 Fast Ethernet Adapter", 0x0021 : "PCI-PCI Bridge", 0x0022 : "PCI-PCI Bridge", 0x0023 : "PCI to PCI Bridge", 0x0024 : "PCI-PCI Bridge", 0x0025 : "PCI-PCI Bridge", 0x0026 : "PCI-PCI Bridge", 0x0034 : "CardBus", 0x0045 : "PCI to PCI Bridge", 0x0046 : "PCI-to-PCI Bridge", 0x1011 : "PCI-PCI Bridge", 0x1065 : "Mylex DAC1164P Disk Array Controller", 0x2000 : "Fault Mgr (3.3v/5v Universal PCI)", }, 0x1013 : { 0x0038 : "pci", 0x0040 : "Flat Panel GUI Accelerator", 0x004C : "64-bit Accelerated LCD/CRT Controller", 0x00A0 : "GUI Accelerator", 0x00A2 : "Alpine GUI Accelerator", 0x00A4 : "Alpine GUI Accelerator", 0x00A8 : "Alpine GUI Accelerator", 0x00AC : "Video card (i guess?)", 0x00B8 : "64-bit VisualMedia Accelerator", 0x00BC : "64-bit SGRAM GUI accelerator", 0x00D0 : "Laguna VisualMedia graphics accelerator", 0x00D4 : "Laguna 3D VisualMedia Graphics Accel", 0x00D5 : "Laguna BD", 0x00D6 : "Laguna 3D VisualMedia Graphics Accel", 0x00E8 : "", 0x1013 : "accelerator do audio do pci de sound fusion", 0x1100 : "PCI-to-PC Card host adapter", 0x1110 : "PCMCIA/CardBus Controller", 0x1112 : "PCMCIA/CardBus Controller", 0x1113 : "PCI-to-CardBus Host Adapter", 0x1200 : "Nordic GUI Accelerator", 0x1202 : "Viking GUI Accelerator", 0x1204 : "Nordic-lite VGA Cntrlr", 0x4000 : "Ambient CLM Data Fax Voice", 0x4400 : "Communications Controller", 0x6001 : "CrystalClear SoundFusion PCI Audio Accelerator", 0x6003 : "Crystal Sound Fusion a", 0x6004 : "CrystalClear SoundFusion PCI Audio Accel", 0x6005 : "Crystal Soundfusion(tm) CS 40210", 0x9876 : "SoundFusion PCI Audio Accelerator", }, 0x1014 : { 0x0002 : "MCA Bridge", 0x0005 : "CPU Bridge", 0x0007 : "CPU Bridge", 0x000A : "ISA Bridge w/PnP", 0x0017 : "CPU to PCI Bridge", 0x0018 : "TR Auto LANStreamer", 0x001B : "Graphics Adapter", 0x001D : "scsi-2 fast pci adapter", 0x0020 : "MCA Bridge", 0x0022 : "PCI to PCI Bridge ", 0x002D : "", 0x002E : "Coppertime RAID SCSI Adapter", 0x0036 : "32-bit LocalBus Bridge", 0x0037 : "PowerPC to PCI Bridge and Memory Ctrlr", 0x003A : "CPU to PCI Bridge", 0x003E : "IBM Token Ring PCI", 0x0045 : "SSA Adapter", 0x0046 : "Interrupt Controller", 0x0047 : "PCI to PCI Bridge", 0x0048 : "PCI to PCI Bridge", 0x0049 : "Warhead SCSI Controller", 0x004D : "MPEG-2 Decoder", 0x004E : "ATM Controller", 0x004F : "ATM Controller", 0x0050 : "ATM Controller", 0x0053 : "25 MBit ATM controller", 0x0057 : "MPEG PCI Bridge", 0x005C : "10/100 PCI Ethernet Adapter", 0x005D : "TCP/IP networking device", 0x007C : "ATM Controller", 0x007D : "MPEG-2 Decoder", 0x0090 : "", 0x0095 : "PCI Docking Bridge", 0x0096 : "Chukar chipset SCSI Controller", 0x00A1 : "ATM support device", 0x00A5 : "ATM Controller", 0x00A6 : "ATM 155Mbps MM Controller", 0x00B7 : "256-bit Graphics Rasterizer", 0x00BE : "ATM 622Mbps Controller", 0x00CE : "Adapter 2 Token Ring Card", 0x00F9 : "Memory Controller and PCI Bridge", 0x00FC : "PCI-64 Bridge", 0x0105 : "PCI-32 Bridge", 0x010F : "Remote Supervisor+Serial Port+Mouse/Keyb", 0x011B : "Raid controller", 0x0142 : "Video Compositor Input", 0x0144 : "Video Compositor Output", 0x0153 : "", 0x0156 : "PLB to PCI Bridge", 0x0170 : "Rasterizer/IBM GT1000 Geometr", 0x0188 : "PCI Bridge", 0x01a2 : "Modem: Intel Corporation 82440MX AC'97 Modem Controller (prog-if 00 [Generic])", 0x01A7 : "PCI-X Bridge R1.1", 0x01BD : "Morpheus SCSI RAID Controller", 0x01ef : "PLB to PCI-X Bridge", 0x01ff : "10/100 Mbps Ethernet PCI Adapter II", 0x0246 : "", 0x027F : "Embedded PowerPC CPU", 0x0289 : "0890", 0x028c : "SCSI Storage Controller", 0x0295 : "IBM SurePOS Riser Card Function 0", 0x0297 : "IBM SurePOS Riser Card Function 1 (UARTs)", 0x02A1 : "Calgary PCI-X Host Bridge", 0x0302 : "PCI-X Host Bridge", 0x0308 : "IBM CalIOC2 (Calgary on PCI-E)", 0x0339 : "n/a", 0x10e5 : "IBM 4764-001 PCI-X Cryptographic Coprocessor (1410e501)", 0xFFFF : "Interrupt Controller", }, 0x1017 : { 0x5343 : "SPEA 3D Accelerator", }, 0x1018 : { 0x3330 : "5444469821", }, 0x1019 : { 0x1B10 : "VIA chipset", 0x9876 : "Intel(R) Celeron(R) CPU 2.80GHz", }, 0x101A : { 0x0005 : "100VG/AnyLAN Adapter", 0x0009 : "PCI-X dual port ", }, 0x101E : { 0x9010 : "Ultra Wide SCSI RAID Controller2", 0x9030 : "EIDE Controller", 0x9031 : "EIDE Controller", 0x9032 : "IDE and SCSI Cntrlr", 0x9033 : "SCSI Controller", 0x9040 : "Multimedia card", 0x9060 : "Ultra GT RAID Controller", 0x9063 : "Remote Assistant", 0x9095 : "SGPIO/SES/IPMI Initiator", }, 0x1022 : { 0x1100 : "HyperTransport Technology Configuration", 0x1101 : "Address Map", 0x1102 : "AMD Hammer - DRAM Controller ", 0x1103 : "AMD Hammer - Miscellaneous Control ", 0x1200 : "PCI standard host CPU bridge", 0x1201 : "PCI standard host CPU bridge", 0x1203 : "PCI standard host CPU bridge", 0x1204 : "PCI standard host CPU bridge", 0x1419 : "AMD Input/Outpu Memory Management Unit", 0x1510 : "PCI standard host CPU bridge", 0x1513 : "PCI standard PCI-to-PCI bridge", 0x1514 : "PCI standard PCI-to-PCI bridge", 0x1515 : "PCI standard PCI-to-PCI bridge", 0x1700 : "PCI standard host CPU bridge", 0x1701 : "PCI standard host CPU bridge", 0x1702 : "PCI standard host CPU bridge", 0x1703 : "PCI standard host CPU bridge", 0x1704 : "PCI standard host CPU bridge", 0x1716 : "PCI standard host CPU bridge", 0x1718 : "PCI standard host CPU bridge", 0x1719 : "PCI standard host CPU bridge", 0x2000 : "PCnet LANCE PCI Ethernet Controller", 0x2001 : "PCnet-Home Networking Ctrlr (1/10 Mbps)", 0x2003 : "Wireless LAN chipset SMC 2602W V3 http://www.smc.com/index.cfm?event=downloads.doSearchCriteria&loca", 0x2020 : "SCSI Controller", 0x2040 : "Ethernet Controller", 0x2081 : "GeodeLX graphics adapter", 0x2082 : "Geode GX3 AES Crypto Driver", 0x208F : "GeodeLink PCI South Bridge", 0x2093 : "CS5536 Audio Controller", 0x2094 : "CS5536 OHCI USB Host Controller", 0x2095 : "CS5536 EHCI USB Host Controller", 0x2096 : "CS5536 USB Device Controller", 0x2097 : "CS5536 USB OTG Controller", 0x209A : "CS5536 IDE Controller", 0x2433 : "Chill Control Connector", 0x3000 : "ELAN Microcontroller PCI Host Bridge", 0x5e4b : "Radeon X700 Pro", 0x7004 : "CPU to PCI Bridge", 0x7006 : "Processor-to-PCI Bridge / Memory Ctrlr", 0x7007 : "AGP and PCI-to-PCI Bridge (1x/2x AGP)", 0x700A : "AGP Host to PCI Bridge", 0x700B : "AGP PCI to PCI Bridge", 0x700C : "CPU to PCI Bridge (SMP chipset)", 0x700D : "CPU to PCI Bridge (AGP 4x)", 0x700E : "North Bridge", 0x700F : "CPU to AGP Bridge (AGP 4x)", 0x7400 : "PCI to ISA Bridge", 0x7401 : "Bus Master IDE Controller", 0x7403 : "Power Management Controller", 0x7404 : "PCI to USB Open Host Controller", 0x7408 : "PCI-ISA Bridge", 0x7409 : "EIDE Controller", 0x740B : "Power Management", 0x740C : "USB Open Host Controller", 0x7410 : "PCI to ISA/LPC Bridge", 0x7411 : "Enhanced IDE Controller", 0x7412 : "USB Controller", 0x7413 : "Power Management Controller", 0x7414 : "USB OpenHCI Host Controller", 0x7440 : "LPC Bridge", 0x7441 : "EIDE Controller", 0x7443 : "System Management", 0x7445 : "AC97 Audio", 0x7446 : "AC97 Modem", 0x7448 : "PCI Bridge", 0x7449 : "USB Controller", 0x7450 : "PCI-X Bridge", 0x7451 : "PCI-X IOAPIC", 0x7454 : "System Controller", 0x7455 : "AGP Bridge", 0x7458 : "PCI-X Bridge", 0x7459 : "PCI-X IOAPIC", 0x7460 : "PCI Bridge", 0x7461 : "USB 2.0 Controller", 0x7462 : "Ethernet Controller", 0x7463 : "USB Enhanced Host Controller", 0x7464 : "USB OpenHCI Host Controller", 0x7468 : "LPC Bridge", 0x7469 : "UltraATA/133 Controller", 0x746A : "SMBus 2.0 Controller", 0x746B : "System Management", 0x746D : " Audio Controller", 0x746E : "AC'97 Modem", 0x756B : "ACPI Controller", 0x7801 : "AMD SATA Controller", 0x7801 : "AMD SATA Controller", 0x7804 : "AMD SATA Controller", 0x7807 : "Standard OpenHCD USB Host Controller - Amd Usb Filter Driver", 0x7808 : "Standard PCI-to-USB extended host controller ", 0x7809 : "Standard OpenHCD USB Host Controller", 0x780b : "SM Bus controller", 0x780D : "High Definition Audio Controller", 0x780E : "PCI standard ISA bridge", 0x780F : "PCI standard PCI-to-PCI bridge", 0x7812 : "AMD USB 3.0 Host Controller", 0x7814 : "FCH USB XHCI Controller", 0x840 : "Used to blow up the motherboard. Highly explosive. Use at ur own risk", 0x9601 : "PCI standard host CPU bridge", 0x9602 : "PCI standard PCI-to-PCI bridge", 0x9604 : "PCI standard PCI-to-PCI bridge", 0x9605 : "PCI standard PCI-to-PCI bridge", 0x9606 : "PCI standard PCI-to-PCI bridge", 0x9642 : "AMD Radeon HD6370D", }, 0x1023 : { 0x0194 : "CardBus Controller", 0x2000 : "advanced PCI DirectSound accelerator", 0x2001 : "PCI Audio", 0x2100 : "Video Accelerator", 0x2200 : "Video adapter", 0x8400 : "sausgauos", 0x8420 : "Trident Cyber Blade i7 AGP (55)", 0x8500 : "Via Tech VT8361/VT8601 Graphics Controller", 0x8520 : "Windows xp", 0x8620 : "trident", 0x8820 : "TRIDENT DISPLAY CONTROLER /CyberALADDiN-T Driver", 0x9320 : "32-bit GUI Accelerator", 0x9350 : "32-bit GUI Accelerator", 0x9360 : "Flat panel Cntrlr", 0x9382 : "", 0x9383 : "", 0x9385 : "", 0x9386 : "Video Accelerator", 0x9388 : "Video Accelerator", 0x9397 : "Video Accelerator 3D", 0x939A : "Video Accelerator", 0x9420 : "DGi GUI Accelerator", 0x9430 : "GUI Accelerator", 0x9440 : "DGi GUI Acclerator", 0x9460 : "32-bit GUI Accelerator", 0x9470 : "", 0x9520 : "Video Accelerator", 0x9525 : "Video Accelerator", 0x9540 : "Video Acclerator", 0x9660 : "GUI Accelerator", 0x9680 : "GUI Accelerator", 0x9682 : "Trident A CAB01", 0x9683 : "GUI Accelerator", 0x9685 : "2MB VGA", 0x9750 : "trident dgi", 0x9753 : "Video Accelerator", 0x9754 : "Wave Video Accelerator", 0x9759 : "Image GUI Accelerator", 0x9783 : "", 0x9785 : "", 0x9850 : "4mb", 0x9880 : "gggggg", 0x9910 : "CyberBlade XP", 0x9930 : "", 0x9960 : "Trident Video Accelerator CyberBlade-1A31", }, 0x1025 : { 0x0028 : "Agere Systems soft modem chip", 0x1435 : "USBVID_0502&PID_3476&MI_016&207B7CA8&0&0001", 0x1445 : "VL Bridge & EIDE", 0x1449 : "ISA Bridge", 0x1451 : "Pentium Chipset", 0x1461 : "P54C Chipset", 0x1489 : "", 0x1511 : "", 0x1512 : "", 0x1513 : "", 0x1521 : "CPU Bridge", 0x1523 : "ISA Bridge", 0x1531 : "North Bridge", 0x1533 : "ISA South Bridge", 0x1535 : "PCI South Bridge", 0x1541 : "AGP PCI North Bridge Aladdin V/V+", 0x1542 : "AGP+PCI North Bridge", 0x1543 : "PCi South Bridge Aladdin IV+/V", 0x1561 : "driver video", 0x1621 : "PCI North Bridge Aladdin Pro II", 0x1631 : "PCI North Bridge Aladdin Pro III", 0x1641 : "PCI North Bridge Aladdin Pro IV", 0x3141 : "GUI Accelerator", 0x3143 : "GUI Accelerator", 0x3145 : "GUI Accelerator", 0x3147 : "GUI Accelerator", 0x3149 : "GUI Accelerator", 0x3151 : "GUI Accelerator", 0x3307 : "MPEG-1 Decoder", 0x3309 : "MPEG Decoder", 0x5212 : "", 0x5215 : "EIDE Controller", 0x5217 : "I/O Controller", 0x5219 : "I/O Controller", 0x5225 : "EIDE Controller", 0x5229 : "EIDE Controlle", 0x5235 : "I/O Controller", 0x5237 : "Intel(R) 5 Series/3400 Series Chipset Family 4 Port SATA AHCI Controller - 3B29", 0x5239 : "", 0x5240 : "EIDE Controller", 0x5241 : "PCMCIA Bridge", 0x5242 : "General Purpose Controller", 0x5243 : "PCI to PCI Bridge", 0x5244 : "Floppy Disk Controller", 0x5247 : "PCI-PCI Bridge", 0x5427 : "PCI to AGP Bridge", 0x5451 : "PCI AC-Link Controller Audio Device", 0x5453 : "M5453 AC-Link Controller Modem Device", 0x7101 : "PCI PMU Power Management Controller", }, 0x1028 : { 0x0001 : "Expandable RAID Controller (PERC) (SCSI)", 0x0002 : "Expandable RAID Controller", 0x0003 : "Expandable RAID Controller", 0x0004 : "Expandable RAID Controller", 0x0005 : "Expandable RAID Controller", 0x0006 : "Expandable RAID Controller", 0x0007 : "Remote Assistant Card", 0x0008 : "RAC Virtual UART Port", 0x000A : "Expandable RAID Controller", 0x000C : "Embedded Systems Management Device 4", 0x000D : "LSI53C895 PCI to Ultra2 SCSI I/O Processor with LVD Link", 0x000E : "PERC 4/DI Raid Controller", 0x0010 : "HJ866 - ESM4 &#1059;&#1076;&#1072;&#1083;&#1077;&#1085;&#1085;&#1099;&#1081; &#1044;&#1086;&#1089;&#", 0x0011 : "Dell Remote Access Controller v4", 0x0012 : "Dell RAC v4 Virtual UART", 0x0013 : "Expandable RAID Controller", 0x0014 : "Dell Remote Access Controller subsystem", 0x0015 : "Integrated RAID controller", 0x012c : "910gml", 0x016d : "Dell PRO/1000 MT Network Connection", 0x0287 : "Adaptec 2200S SCSI RAID controller", 0x1000 : "A Intel 537 epg v.92 modem repackaged by dell", 0x1050 : "ethernet controller", 0x1f0c : "PCI Simple Communication Controller", 0x1f0c : "pci simple communication controller", 0x2107 : "HID Keyboard Device", 0x3002 : "Dell Wireless 1702 Bluetooth v3.0+HS", 0x3582 : "video controller", 0x675d : "amd radeon hd7570", 0x6821 : "3", 0x8184 : "Dell Wireless 5540 HSPA Mini Card", 0x9876 : "Expandable RAID Controller", }, 0x102A : { 0x0000 : "4 port usb hub", 0x0003 : "USBVID_0000&PID_00006&76F8B18&0&2", 0x0010 : "i486 Chipset", 0x002A : "4 port usb hub", 0x102A : "P5 Chipset", 0x9876 : "P5 CHIPSET", }, 0x102B : { 0x0010 : "Impression?", 0x0040 : "Matrox P650 very new model (20080724)", 0x051 : "matrox", 0x0518 : "Atlas GUI Accelerator", 0x0519 : "Strorm GUI Accelerator", 0x051A : "Hurricane/Cyclone 64-bit graphics chip", 0x051B : "Matrox", 0x051E : "Chinook", 0x051F : "Mistral", 0x0520 : "AGP", 0x0521 : "102B", 0x0522 : "Matrox G200e (ServerEngines) - English", 0x0525 : "Intel Pentium III", 0x0527 : "", 0x0528 : "Parhelia 128MB/256MB/PCI/HR256", 0x0530 : "Matrox G200eV", 0x0534 : "G200eR", 0x0540 : "M9138 LP PCIe x16", 0x0D10 : "Athena GUI accelerator", 0x1000 : "Twister", 0x1001 : "Twister AGP", 0x1525 : "", 0x1527 : "", 0x2007 : "GUI+3D Accelerator", 0x2527 : "AGP Chipset", 0x2537 : "Parhelia Chipset AGP", 0x2538 : "Matrox Millennium P650 LP PCIe 64", 0x2539 : "Matrox Graphics Board dual DVI", 0x4536 : "Video Capture Card", 0x522 : "Matrox G200e (ServerEngines)", 0x525 : "G45+", 0x532 : "Matrox G200eW 8 MB DDR2 ", 0x6573 : "10/100 Multiport Switch NIC", 0x80A0 : "Multimedia Device", 0x9876 : "Multimedia device", }, 0x102C : { 0x00B8 : "Wingine DGX - DRAM Graphics Accelerator", 0x00C0 : "AGP/PCI Flat Panel/CRT VGA Accelerator", 0x00D0 : "Flat panel/crt VGA Cntrlr", 0x00D8 : "Flat Panel/CRT VGA Controller", 0x00DC : "GUI Accelerator", 0x00E0 : "LCD/CRT controller", 0x00E4 : "Flat Panel/LCD CRT GUI Accelerator", 0x00E5 : "VGA GUI Accelerator", 0x00F0 : "vga Controller", 0x00F4 : "graphic driver", 0x00F5 : "GUI Controller", 0x01E0 : "PCI Flat Panel/CRT VGA Accelerator", 0x0C30 : "AGP/PCI Flat Panel/CRT VGA Accelerator", }, 0x102D : { 0x50DC : "Audio", }, 0x102F : { 0x0009 : "CPU Bridge", 0x000A : "CPU Bridge?", 0x0020 : "ATM PCI Adapter", 0x0030 : "PCIVEN_8086", 0x0031 : "Integrated 10/100 Mbit Ethernet Controller", 0x0100 : "Realtek RTS5208 Card Reader", 0x0105 : "GOKU-S Bus Master IDE Controller", 0x0106 : "GOKU-S USB Host Controller", 0x0107 : "GOKU-S USB Device Controller", 0x0108 : "GOKU-S I2C Bus/SIO/GPIO Controller", 0x0180 : "MIPS Processor", 0x0181 : "MIPS RISC PCI Controller (PCIC)", 0x0182 : "MIPS RISC PCI Controller (PCIC)", 0x01BA : "SpursEngine", 0x0805 : "PCIVEN_1179&DEV_0805&CC_0880", 0x102F : "PCIVEN_1179&DEV_0805&SUBSYS_00011179&REV_054&7562792&0&68F0", }, 0x1031 : { 0x5601 : "I/O & JPEG", 0x5607 : "video in and out with motion jpeg compression and deco", 0x5631 : "", 0x6057 : "DC30D-601601-4.0", }, 0x1033 : { 0x0001 : "PCI to 486 like bus Bridge", 0x0002 : "PCI to VL98 Bridge", 0x0003 : "ATM Controller", 0x0004 : "PCI bus Bridge", 0x0005 : "PCI to 486 like peripheral bus Bridge", 0x0006 : "GUI Accelerator", 0x0007 : "PCI to ux-bus Bridge", 0x0008 : "GUI Accelerator (vga equivalent)", 0x0009 : "graphic Cntrlr for 98", 0x001A : "", 0x001D : "NEASCOT-S20 ATM Integrated SAR Ctrlr", 0x0021 : "Nile I", 0x0029 : "3D Accelerator", 0x002A : "3D Accelerator", 0x002f : "1394 Host Controller", 0x0034 : "PCIVEN_1033&DEV_0034&SUBSYS_00341033", 0x0035 : "Dual OHCI controllers plus Single EHCI controller", 0x0036 : "NEASCOT-S40C ATM Light SAR Controller", 0x003E : "NAPCCARD CardBus Controller", 0x0046 : "3D Accelerator", 0x005A : "Nile 4", 0x0063 : "Firewarden IEEE1394 OHCI Host Controller", 0x0067 : "PowerVR series II graphics processor", 0x0074 : "56k Voice Modem", 0x009B : "", 0x00A6 : "", 0x00BE : "64-bit CPU with Northbridge", 0x00CD : "IEEE1394 1-Chip OHCI Host Controller", 0x00CE : "IEEE1394 1-Chip OHCI Host Controller", 0x00E0 : "USB 2.0 Host Controller", 0x00E0 : "USB 2.0 Host Controller", 0x00E7 : "IEEE1394 OHCI 1.1 3-port PHY-Link Ctrlr", 0x00F2 : "IEEE1394+OHCI+1.1+3-port+PHY-Link+Ctrlr", 0x0165 : "AVerMedia A313 MiniCard Hybrid DVB-T", 0x0194 : "Renesas Electronics USB 3.0 Host Controller", 0x0520 : "1394 CARD", 0x1033 : "NEC PCI to USB Open Host Controller", 0x9876 : "USB 2.0 Host Controller", }, 0x1036 : { 0x0000 : "Fast SCSI", }, 0x1039 : { 0x7012 : "PCI Audio Accelerator", 0x0001 : "Anthlon 64 cpu to PCI bridge", 0x0002 : "Virtual PCI to PCI Bridge (AGP)", 0x0003 : "SiS AGP Controller / SiS Accelerated Graphics Port ", 0x0005 : "Pentium chipset", 0x0006 : "PCI/ISA Cache Memory Controller (PCMC)", 0x0008 : "PCI System I/O (PSIO)", 0x0009 : "SIS PMU device", 0x0016 : "SMBus ControllerP4kjc", 0x0018 : "vga", 0x0160 : "SiS160 811 Wireless LAN Adapter", 0x0180 : "SiS 180/181 RAID Controller ", 0x0181 : "Raid Controller(?Mode Raid1)", 0x0182 : "Raid Controller(?Mode Raid0+1)", 0x0183 : "?SATA", 0x0186 : "0330", 0x0190 : " SiS965", 0x0191 : "SIS191", 0x0200 : "Onboard Graphics Controller", 0x0204 : "PCI1", 0x0205 : "PCI Graphics & Video Accelerator", 0x0300 : "GUI Accelerator+3D", 0x0305 : "2D/3D/Video/DVD Accelerator", 0x0315 : "2D/3D Accelerator", 0x0325 : "Silicon Integrated Systems (SiS)", 0x0330 : "Xabre 2D/3D Accelerator (AG400T8-D64)", 0x0406 : "PCI/ISA Cache Memory Controller (PCMC)", 0x0496 : "CPU to PCI & PCI to ISA Bridge", 0x0530 : "Host-to-PCI bridge", 0x0540 : "Host-to-PCI Bridge", 0x0550 : "North Bridge", 0x0596 : "Pentium PCI chipset with IDE", 0x0597 : "EIDE Controller (step C)", 0x0601 : "PCI EIDE Controller", 0x0620 : "Host-to-PCI Bridge", 0x0630 : "Host-to-PCI Bridge", 0x0635 : "Host-to-PCI Bridge", 0x0640 : "Host-to-PCI Bridge", 0x0645 : "Host-to-PCI Bridge", 0x0646 : "Host-to-PCI Bridge", 0x0648 : "Host-to-PCI Bridge", 0x0649 : "Host-to-PCI Bridge", 0x0650 : "Host-to-PCI Bridge", 0x0651 : "Host-to-PCI Bridge", 0x0655 : "Host-to-PCI Bridge", 0x0656 : "CPU to PCI Bridge", 0x0658 : "CPU to PCI Bridge", 0x0659 : "CPU to PCI Bridge", 0x0660 : "Host-to-PCI Bridge", 0x0661 : "SiS 661FX/GX Chipset - Host-PCI Bridge", 0x0662 : "CPU to PCI Bridge", 0x0663 : "CPU to PCI Bridge", 0x0730 : "Host-to-PCI Bridge", 0x0735 : "Host-to-PCI Bridge", 0x0740 : "LPC Bridge", 0x0741 : "CPU to PCI Bridge", 0x0745 : "Host-to-PCI Bridge", 0x0746 : "Host-to-PCI Bridge", 0x0748 : "CPU to PCI Bridge", 0x0755 : "Host-to-PCI Bridge", 0x0756 : "CPU to PCI Bridge", 0x0760 : "Athlon 64 CPU to PCI Bridge", 0x0761 : "Athlon 64 CPU to PCI Bridge", 0x0762 : "Athlon 64 CPU to PCI Bridge", 0x0900 : "SiS 900 Fast Ethernet Adapter", 0x0901 : "SiS900 10/100 Ethernet Adapter", 0x0962 : "LPC Bridge", 0x0963 : "PCI to ISA Bridge", 0x0964 : "SiS 964 MuTIOL Media I/O Bridge ", 0x0999 : "pciven_1039&subsys_200b163&rev_a03&61aaa010&17", 0x1039 : "SiS5597 SVGAa", 0x1040 : "", 0x10ec : "bus controler", 0x1182 : "Raid Controller(?Mode Raid5)", 0x1183 : "SATA IDE Controller", 0x1184 : "Raid/AHCI Controller", 0x1185 : "AHCI Controller", 0x1234 : "SiS5597 SVGAa", 0x191 : "PCI /ven_1039", 0x3602 : "IDE Controller", 0x4321 : "Video Controller (VGA Compatible)", 0x5107 : "Hot Docking Controller", 0x5300 : "AGP", 0x5315 : "GUI Accelerator", 0x5401 : "486 PCI Chipset", 0x5511 : "PCI/ISA System Memory Controller", 0x5513 : "SiS 5513 IDE UDMA Controller / SiS 85C513 IDE Controller", 0x5517 : "CPU to PCI Bridge", 0x5518 : "UDMA IDE Controller", 0x5571 : "Memory/PCI bridge", 0x5581 : "p5 chipset", 0x5582 : "PCI to ISA Bridge", 0x5591 : "PCIVEN_1039", 0x5596 : " VGA Controller", 0x5597 : "Host to PCI bridge", 0x5600 : "Host-to-PCI Bridge", 0x5630 : "Host-to-PCI Bridge", 0x5811 : "", 0x6204 : "video decoder/mpeg interface", 0x6205 : "PCI VGA Controller", 0x6225 : "PCI Graphics & Video Accelerator", 0x6236 : "Graphics", 0x6300 : "GUI Accelerator+3D", 0x6306 : "Integrated 3D SVGA Controller", 0x6325 : "sis-651dx", 0x6326 : "sis 6326 AGP", 0x6330 : "GUI 2D/3D Accelerator", 0x6331 : "USB Host Controller", 0x6351 : "SiS IGP Graphics family SIS66x/SIS76x & SIS67x", 0x6355 : "962lua", 0x6787 : "Smart Link 56K Voice Modem (download from driverguide.com)", 0x6972 : "", 0x7001 : "SiS 7001 PCI to USB Open Host Controller ", 0x7002 : "USB 2.0 Enhanced Host Controller", 0x7005 : "Memory Stick Controller", 0x7007 : "OHCI Compliant FireWire Controller", 0x7012 : "SiS 7012 Audio Device / Realtek AC'97 Audio", 0x7013 : "Smart Link 56K Voice Modem (download from driverguide.com)", 0x7013 : "Smart Link 56K Voice Modem", 0x7015 : "Software Audio dd", 0x7016 : "10/100 Ethernet Adapter", 0x7018 : "Onboard audio", 0x7019 : "Hardware Audio", 0x7300 : "GUI Accelerator+3D", 0x7502 : "Realtek HDA Audio Driver.", 0x8139 : "2012", 0x9632 : "sis 650 integrated gfx controller (IGP)", 0x964 : "LPC BRIDGE", 0x9876 : "pci vga card for win95 & nt4 only", 0x9989 : "Smart Link 56K Voice Modem (download from driverguide.com)", 0x7012 : "PCI Audio Accelerator", 5811 : "", }, 0x103B : { 0x103b : "LAN Controller with 82562EM/EX PHY", }, 0x103C : { 0x0024 : "Standard Vista USB Keyboard", 0x0180 : "HID Keyboard Device", 0x0A01 : "HP Scanjet 2400", 0x1005 : "Visialize EG", 0x1008 : "001", 0x100A : "Hewlett-Packard VisualizeFX Series Video", 0x1028 : "HP ProtectSmart Hard Drive Protection - HP 3D DriveGuard", 0x1029 : "Tachyon XL2 Fibre Channel Adapter", 0x102A : "Tachyon TS Fibre Channel Host Adapter", 0x1030 : "DeskDirect 10/100VG LAN Adapter", 0x1031 : "DeskDirect 10/100", 0x1040 : "DeskDirect 10BaseT NIC", 0x1041 : "DeskDirect 10/100VG NIC", 0x1042 : "DeskDirect 10BaseT/2 NIC", 0x1048 : "", 0x1049 : "", 0x104A : "intel", 0x104B : "", 0x104D : "EL-10 Ethernet Adapter", 0x1064 : "PCnet Ethernet Controller", 0x10C1 : "NetServer Smart IRQ Router", 0x10ED : "HP Communications Port", 0x1200 : "10/100 NIC", 0x1219 : "NetServer PCI Hot-Plug Controller", 0x121A : "NetServer SMIC Controller", 0x121B : "NetServer Legacy COM Port Decoder", 0x121C : "NetServer PCI COM Port Decoder", 0x1229 : "System Bus Adapter", 0x122A : "I/O Controller", 0x122B : "Local Bus Adapter", 0x12FA : "Broadcom Wireless miniPCI in a HP laptop", 0x1302 : "HP Management Shared Memory Device", 0x137a : "Atheros AR5007", 0x1411 : "HP PSC 750", 0x171d : "HP Integrated Module with Bluetooth Wireless", 0x1F1D : "3G Broadband device", 0x201D : "3G Broadband device", 0x231D : "HP Integrated Module with Bluetooth Wireless Technology", 0x241D : "HP compaq nx6125", 0x2910 : "PCI Bus Exerciser", 0x292 : "PCI Host Interface Adapter", 0x2920 : "Fast Host Interface", 0x2924 : "PCI Host Interface Adapter", 0x2925 : "32 bit PCI Bus Exerciser and Analyzer", 0x2926 : "64 bit PCI Bus Exerciser and Analyzer", 0x2927 : "64 Bit", 0x294 : "pci hostinterface", 0x2940 : "64 bit", 0x311d : "ATHEROS AR3011 bluetooth 3.0+HS adapter", 0x3206 : "Adaptec Embedded Serial ATA HostRAID", 0x3207 : "not sure", 0x3220 : "P600 SmartArray Raid Controller", 0x3230 : "Smart Array P400 Controller", 0x323A : "Smart Array P410i Controller", 0x3302 : "Integrated Lights Out 2.0 Interfaz IPMI", 0x3A1D : "HP hs2340 HSPA+ MobileBroadband", 0x5461 : "HP integrated Module with Bluetooth 2.0 Wireless support", 0x6007 : "HP Mobile Data Protection Sensor", 0x9876 : "ATHEROS AR3011 bluetooth 3.0+HS adapter", }, 0x1043 : { 0x0675 : "Crestline", 0x1969 : "Attansic L1 Gigabit Ethernet 10/100/1000Base-T Adapter", 0x5653 : "ATI Radeon 3000 Graphics (Microsoft Corporation - WDDM v1.1)", 0x8103 : "NV31 [GeForce FX 5600 Ultra]", 0x82c6 : "Gigabit Ethernet(NDIS 6.0)", }, 0x1044 : { 0x1012 : "RAID Engine", 0x800A : "802.11 bg WLAN", 0xA400 : "SmartCache III/RAID SCSI Controller", 0xA500 : "PCI Bridge", 0xA501 : "I2O SmartRAID V Controller", 0xA511 : "SmartRAID Controller", }, 0x1045 : { 0x0005 : "", 0xA0F8 : "PCI USB Controller", 0xC101 : "GUI Accelerator", 0xC178 : "pci usb card 2- port", 0xC556 : "Viper", 0xC557 : "CPU Bridge (Viper)", 0xC558 : "ISA Bridge w/PnP", 0xC567 : "Vendetta chipset: host bridge", 0xC568 : "Vendetta chipset: ISA bridge", 0xC569 : "Pentium to PCI Bridge", 0xC621 : "PCI IDE Controller (PIC)", 0xC700 : "82C700 FireStar PCI to ISA Bridge", 0xC701 : "FireStar mobile chipset: host bridge", 0xC814 : "FireBridge II Docking Station Controller", 0xC822 : "CPU to PCI & PCI to ISA PnP bridge", 0xC824 : "FireFox 32-Bit PC Card Controller", 0xC825 : "PCI-to-ISA Bridge", 0xC832 : "CPU-to-PCI and PCI-to-ISA Bridge", 0xC861 : "OPTi 82C861 PCI to USB Open Host Controller", 0xC881 : "FireLink 1394 OHCI Link Controller", 0xC895 : "", 0xC931 : "ISA Sound & Game Port controller.", 0xC935 : "MachOne integrated PCI audio processor", 0xD568 : "PCI bus master IDE controller", 0xD768 : "Ultra DMA IDE controller", }, 0x1046 : { 0x5600 : "00/4&1a671", }, 0x1048 : { 0x0253 : "ELSA GLADIAC 528", 0x0C60 : "NVidia Geforce 2 MX", 0x0C71 : "NVidia GeForce3 Ti 200", 0x1000 : "ISDN Controller", 0x3000 : "", 0x8901 : "ELSA GLoria XL", }, 0x104A : { 0x0008 : "diamond", 0x0009 : "", 0x0010 : "PowerVR KYRO series 3 graphics processor", 0x0123 : "SPEAr1300", 0x0209 : "North/South Bridges", 0x020A : "North Bridge", 0x0210 : "ISA Bridge", 0x021A : "ISA Bridge", 0x021B : "ISA Bridge", 0x0228 : "IDE Controller", 0x0230 : "USB Controller", 0x0500 : "ADSL", 0x0981 : "10/100 Ethernet Adapter", 0x1746 : "mp280", 0x2774 : "PCI 10/100 Ethernet Controller", 0x3520 : "MPEG-II Video Decoder", 0x7108 : "Advanced HD AVC decoder with 3D graphics acceleration", 0xCC00 : "ConneXt I/O Hub multifunction device", 0xCC01 : "ConneXt I/O Hub multifunction device", 0xCC02 : "ConneXt I/O Hub multifunction device", 0xCC03 : "ConneXt I/O Hub multifunction device", 0xCC04 : "ConneXt I/O Hub multifunction device", 0xCC05 : "ConneXt I/O Hub multifunction device", 0xCC06 : "ConneXt I/O Hub multifunction device", 0xCC07 : "ConneXt I/O Hub multifunction device", 0xCC08 : "ConneXt I/O Hub multifunction device", 0xCC09 : "ConneXt I/O Hub multifunction device", 0xCC0A : "ConneXt I/O Hub multifunction device", 0xCC0B : "ConneXt I/O Hub multifunction device", 0xCC0C : "ConneXt I/O Hub multifunction device", 0xCC0D : "ConneXt I/O Hub multifunction device", 0xCC0E : "ConneXt I/O Hub multifunction device", 0xCC0F : "ConneXt I/O Hub multifunction device", 0xCC10 : "ConneXt I/O Hub multifunction device", 0xCC11 : "ConneXt I/O Hub multifunction device", 0xCC12 : "ConneXt I/O Hub multifunction device", 0xCC13 : "ConneXt I/O Hub multifunction device", 0xCC14 : "ConneXt I/O Hub multifunction device", 0xCC15 : "ConneXt I/O Hub multifunction device", 0xCC16 : "ConneXt I/O Hub multifunction device", 0xCC17 : "ConneXt I/O Hub multifunction device", 0xCD00 : "SPEAr1300", 0xCD80 : "Root Complex of SPEAr1300", }, 0x104B : { 0x1040 : "BT958 SCSI Host Adaptor", 0x8130 : "Flashpoint LT", }, 0x104C : { 0x803B : "Texas Instruments Card Reader", 0x014e : "4515", 0x0500 : "ThunderLAN 100 Mbit LAN Controller", 0x0508 : "PCI interface for TI380 compressors", 0x1000 : "", 0x104C : "PCI Simple Communications Controller", 0x3B04 : "otros dispositivos", 0x3D04 : "Permedia", 0x3D07 : "AGP Permedia 2", 0x8000 : "LYNX IEEE1394 FireWire Host Controller", 0x8009 : "OHCI-Lynx PCI IEEE 1394 Host Controller", 0x8010 : "OHCI-Lynx IEEE 1394 Host Controller", 0x8011 : "OHCI-Lynx IEEE 1394 Controller", 0x8017 : "OHCI-Lynx IEEE 1394 Controller", 0x8019 : "OHCI-Lynx PCI IEEE 1394 Host Controller", 0x8020 : "OHCI Compliant FireWire Controller", 0x8021 : "1394a-2000 OHCI PHY/Link Layer CONTROLLER", 0x8023 : "IEEE1394a-2000 OHCI PHY/Link-Layer Ctrlr", 0x8024 : "1394a-2000 OHCI PHY/Link Layer Ctrl", 0x8025 : "1394b OHCI-Lynx IEEE 1394 Host Controller", 0x8026 : "1394a-2000 OHCI PHY/Link Layer Ctrlr", 0x8027 : "OHCI-Lynx IEEE 1394 Controller", 0x8029 : "OHCI Compliant IEEE-1394 FireWire Controller ", 0x802e : "OHCI Compliant IEEE 1394 Host Controller", 0x8031 : "Generic CardBus Controller", 0x8033 : "Integrated FlashMedia / Card Reader", 0x8034 : "SDA Standard Compliant SD Host Controller", 0x8035 : "PCI GemCore based SmartCard controller", 0x8036 : "Texas Instruments PCIxxx12 Cardbus Controller", 0x8038 : "Texas Instruments PCI GemCore based SmartCard Controller", 0x8039 : "104C", 0x803a : "OHCI Compliant IEEE 1394 Host controller", 0x803B : "Texas Instruments Card Reader", 0x803c : "SDA Standard Compliant SD Host Controller", 0x803D : "Texas Instruments PCI GemCore based SmartCard controller", 0x8119 : "iRDA Compatible Controller", 0x8201 : "TI UltraMedia Firmware Loader Device", 0x8204 : " 4515", 0x8231 : "PCI-Express to PCI/PCI-X bridge", 0x8232 : "Controladora de vdeo multimedia", 0x8241 : "Texas Instruments USB 3.0 XHCI Host Controller", 0x8400 : "D-Link AirPlus DWL-520+", 0x8671 : "bogus", 0x9065 : "Fixed Point Digital Signal Processor", 0x9066 : "U.S. Robotics 802.11g Wireless Turbo PC Card ", 0xA001 : "64-bit PCI ATM SAR", 0xA100 : "32-bit PCI ATM SAR", 0xA102 : "HyperSAR Plus w/PCI host & UTOPIA i/f", 0XA106 : "Fixed Point Digital Signal Processor", 0xA186 : "TI C6416T DSP", 0xa828 : "PCI-to-PCI Bridge", 0xAC10 : "PC Card Controller", 0xAC11 : "PC Card Controller", 0xAC12 : "PC card CardBus Controller", 0xAC13 : "Texas Instruments PCIxx12 Integrated FlashMedia Controller", 0xAC15 : "CardBus Controller", 0xAC16 : "PC Card CardBus Controller", 0xAC17 : "CardBus Controller", 0xAC18 : "PC card CardBus Controller", 0xAC19 : "PC card CardBus Controller", 0xAC1A : "PC card CardBus Controller", 0xAC1B : "PC card CardBus Controller", 0xAC1C : "PC Card CardBus Controller", 0xac1e : "PCI To PCMCIA bridge", 0xAC1F : "PC card CardBus Controller", 0xAC20 : "PCI to PCI Bridge", 0xAC21 : "PCI to PCI Bridge", 0xAC22 : "PCI Docking Bridge", 0xAC23 : "PCI-to-PCI Bridge", 0xAC28 : "PCI-to-PCI Bridge", 0xAC30 : "PC card CardBus Controller", 0xAC40 : "PC card CardBus Controller", 0xAC41 : "PC card CardBus Controller", 0xAC42 : "PC card CardBus Controller", 0xAC43 : "PC card CardBus Controller", 0xAC44 : "PC Card Controller SDFSDAFSADFSDAFSDAF", 0xAC46 : "PCCard CardBus Controller", 0xac47 : "Cardbus", 0xAC50 : "PC card cardBus Controller", 0xAC51 : "Texas Instruments 1420", 0xAC52 : "PC card CardBus Controller", 0xAC53 : "PC card CardBus Controller - 5-in-1 Media Card Reader", 0xAC54 : "PCCard CardBus Controller w/UltraMedia", 0xAC55 : "PCCard CardBus Controller", 0xAC56 : "PCCard CardBus Controller", 0xAC57 : "PCCard CardBus Controller", 0xAC58 : "PCCard CardBus Controller", 0xAC59 : "PCCard CardBus Controller w/UltraMedia", 0xAC5A : "PCCard CardBus Controller w/UltraMedia", 0xac60 : "PCI2040 PCI to DSP Bridge", 0xac8e : "Generic CardBus Controller ", 0xAC8F : "FlashMedia", 0xB000 : "Device ID: 0xB001 ", 0xB001 : "DSP with a C64x+ core and M/S PCI interface", 0xFE00 : "FireWire Host Controller", 0xFE03 : "FireWire Host Controller", }, 0x104D : { 0x011B : "USB Ralink Wireless LAN", 0x5001 : "Sony Firmware Extension Parser listed as ACPI&#92;SNY5001 in device manager.", 0x6001 : "Sony Programmable I/O Control Device", 0x8009 : "PCI bus 9", 0x8039 : "OHCI i.LINK (IEEE 1394) PCI Host Ctrlr", 0x8056 : "Rockwell HCF 56K Modem", 0x8087 : "SONY MPEG ENCODER", 0x808A : "Memory Stick Controller", }, 0x104E : { 0x0017 : "", 0x0107 : "Spitfire VGA Accelerator", 0x0109 : "Video Adapter", 0x0217 : "", 0x0317 : "", 0x0611 : "T9732", 0x317 : "Spitfire VGA Accelerator", }, 0x104F : { 0x104F : "Multi I/O", }, 0x1050 : { 0x6692 : "PCI BusISDN S/T-Controllerry", 0x0000 : "Ethernet Controller (NE2000 compatible)", 0x0001 : "PCI/IDE controller", 0x0033 : "Winbond W89C33 mPCI 802.11 Wireless LAN Adapter", 0x0105 : "Ethernet Adapter", 0x0628 : "PCI to ISA Bridge Set", 0x0840 : "100/10Mbps Ethernet Controller", 0x0940 : "winbond pci ethernet", 0x1050 : "Video capture card mpeg-1", 0x5A5A : "ELANC-PCI Twisted-pair Ether-LAN Ctrlr", 0x6692 : "PCI BusISDN S/T-Controller", 0x8481 : "SD Host Controller", 0x9921 : "MPEG1 capture card", 0x9922 : "MPEG-1/2 Decoder", 0x9960 : "Video Codec", 0x9961 : "H.263/H.261 Video Codec", 0x9970 : "VGA controller", 0x9971 : "W9971CF", 6692 : "", }, 0x1051 : { 0x0100 : "", }, 0x1054 : { 0003 : "0003", 0x0001 : "PCI Bridge", 0x0002 : "PCI bus Cntrlr", 0x0003 : "hts547575a9e384", 0x3505 : "SuperH (SH) 32-Bit RISC MCU/MPU Series", }, 0x1055 : { 0x0810 : "EFAR 486 host Bridge", 0x0922 : "Pentium/p54c host Bridge", 0x0926 : "ISA Bridge", 0x9130 : "Ultra ATA/66 IDE Controller", 0x9460 : "Victory66 PCI to ISA Bridge", 0x9461 : "Victory66 UDMA EIDE Controller", 0x9462 : "Victory66 USB Host Controller", 0x9463 : "Victory66 Power Management Controller", 0xe420 : "PCI 10/100 Ethernet controller", }, 0x1056 : { 0x2001 : "Philips P89C51RD271BA. 1D041700A0. AeD0217G", }, 0x1057 : { 0*5600 : "Motorola FM 56 PCI Speakerphone Modem", 0x0001 : "PCI Bridge / Memory Controller (PCIB/MC)", 0x0002 : "PCI Bridge/Memory Controller (PCIB/MC)", 0x0003 : "Integrated Processor", 0x0004 : "PCI Bridge/Memory Controller for PPC", 0x0006 : "Integrated Processor", 0x0100 : "HCF-PCI", 0x0431 : "100VG Ethernet Controller", 0x1801 : "24-bit Digital Signal Processor", 0x1802 : "24-Bit Digital Signal Processor", 0x18C0 : "PowerQUICC II PCI Bridge", 0x3052 : "MotorolaSM56Modem_PCI device", 0x3055 : "Motorola SM56 Data Fax Modem ", 0x3057 : "Modem Device on High Definition Audio Bus", 0x3410 : "Digital Signal Processor", 0x3421 : "Modem", 0x4801 : "PowerPC Chipset", 0x4802 : "memory control chipset", 0x4803 : "", 0x4806 : "", 0x4809 : "HotSwap Controller", 0x5600 : "SM 56 PCI Speakerphone/Data", 0x5602 : "PCI Modem", 0x5608 : "Motorola SM56 Speakerphone Modem", 0x5803 : "32-Bit Embedded PowerPC Processor", 0x6400 : "Security Co-Processor", 0x9876 : "3052", }, 0x105A : { 0x0262 : "Ultra66/FastTrak66", 0x0D30 : "MBUltra100/MBFastTrack100 Lite", 0x0D38 : "FastTrak66 Lite EIDE Controller", 0x105A : "EIDE Controller", 0x1275 : "MBUltra133", 0x1960 : "SuperTrak 66/100 RAID", 0x1962 : "SuperTrak SX 6000", 0x3318 : "SATA150 TX4", 0x3319 : "FastTrak S150 TX4", 0x3371 : "FastTrak S150 TX2+", 0x3373 : "FastTrak 378/SATA 378 RAID Controller", 0x3375 : "SATA150 TX2+", 0x3376 : "FastTrak 376 Controller", 0x3515 : "FastTrak TX43xx", 0x3519 : "FastTrak TX42xx", 0x3570 : "FastTrak TX2300 SATA300 Controller", 0x3571 : "Fasttrack TX2200", 0x3574 : "SATAII 150 579", 0x3d17 : "SATA 300 TX4 Controller", 0x3D18 : "SATAII 150TX2+/SATAII150 TX4", 0x3D73 : "SATAII 300 TX2+", 0x3F19 : "FastTrak TX2650/4650/4652", 0x3F20 : "FastTrak TX2650(3F21)/4650(3F22)/PDC42819(3716)", 0x4302 : "SuperTrak EX 43X0", 0x4303 : "SuperTrak EX 4350", 0x4D30 : "FastTrack100 on Intel MB SE7500CW2", 0x4D33 : "FastTrak/Ultra33 ATA RAID controller", 0x4D38 : "Ultra66/FastTrak66", 0x4D68 : "Ultra100TX2/FastTrak100TX/LP", 0x4D69 : "Ultra133TX2", 0x5275 : "MBUltra133/MBFastTrak133", 0x5300 : "EIDE Controller", 0x6268 : "FastTrak100 TX2/TX4/LP", 0x6269 : "FastTrak TX2000 EIDE controller", 0x6300 : "FastTrak SX 8300", 0x6301 : "FastTrak SX8300-1", 0x6302 : "FastTrak SX 4300", 0x6303 : "FastTrak SX 4", 0x6304 : "FastTrak SX8300-2", 0x6305 : "FastTrak SX8300-3", 0x6306 : "FastTrak SX 4300-2", 0x6307 : "FastTrak SX 4300-3", 0x6621 : "FastTrak SX4000", 0x6622 : "FastTrak S150SX4", 0x6629 : "FastTrak TX4000", 0x7250 : "Vitesse 7250 SAS RAID", 0x7275 : "SBUltra133/SBFastTrak 133 Lite", 0x8000 : "SATAII150 SX8", 0x8002 : "SATAII150 SX8", 0x8003 : "FastTrak SX4000", 0x8004 : "SATAII150 SX8", 0x8006 : "SATAII150 SX8", 0x8350 : "SuperTrak EX8350/16350/8300/16300", 0x8650 : "SuperTrak EX SAS RAID", 0xC350 : "SuperTrak EX 123X0", 0xE350 : "SuperTrak EX 243X0", }, 0x105D : { 0x2309 : "GUI Accelerator", 0x2339 : "Imagine 128 Series 2", 0x493D : "Revolution 3D", 0x5348 : "Revolution IV", }, 0x1060 : { 0x0001 : "486 Chipset", 0x0002 : "ISA Bridge", 0x0101 : "EIDE Controller", 0x0881 : "HB4 486 PCI Chipset", 0x0886 : "ISA Bridge", 0x0891 : "Pentium CPU to PCI bridge", 0x1001 : "IDE Cntrlr (dual function)", 0x673A : "EIDE Controller", 0x673B : "EIDE Master/DMA", 0x8710 : "VGA Cntrlr", 0x8821 : "CPU/PCI Bridge", 0x8822 : "PCI/ISA Bridge", 0x8851 : "Pentium CPU/PCI Bridge", 0x8852 : "Pentium CPU/ISA Bridge", 0x886A : "ISA Bridge with EIDE", 0x8881 : "HB4 486 PCI Chipset", 0x8886 : "ISA Bridge (w/o IDE support)", 0x888A : "", 0x8891 : "586 Chipset", 0x9017 : "Ethernet", 0x9018 : "Ethernet", 0x9026 : "Fast Ethernet", 0xE881 : "486 Chipset", 0xE886 : "ISA Bridge w/EIDE", 0xE88A : "PCI / ISA Bridge", 0xE891 : "um8891n", }, 0x1061 : { 0x0001 : "GUI Accelerator", 0x0002 : "MPEG Decoder", }, 0x1065 : { 0x8139 : "Realtek 8139C Network Card", }, 0x1066 : { 0x0000 : "VL Bridge", 0x0001 : "Vesuvius V1-LS System Controller", 0x0002 : "Vesuvius V3-LS ISA Bridge", 0x0003 : "Nile PCI to PCI Bridge", 0x0004 : "Nile-II PCI to PCI Bridge", 0x0005 : "System Controller", 0x8002 : "ISA Bridge", }, 0x1067 : { 0x1002 : "VolumePro Volume Rendering Accelerator", }, 0x106B : { 0x0001 : "PowerPC Host-PCI Bridge", 0x0002 : "I/O Controller", 0x0003 : "", 0x0004 : "Video-in", 0x0007 : "I/O Controller", 0x0009 : "BCM5703X", 0x000C : "", 0x000E : "Mac I/O Controller", 0x0010 : "Mac I/O Controller", 0x0017 : "Mac I/O Controller", 0x0018 : "FireWire Controller", 0x001F : "Host-PCI bridge", 0x0020 : "AGP interface", 0x0026 : "USB Interface", 0x0027 : "AGP interface", 0x002D : "AGP Bridge", 0x002E : "PCI Bridge", 0x002F : "Internal PCI", 0x0030 : "FireWire Controller", 0x003B : "Integrated ATA Controller", 0x004f : "Mac I/O controler", 0x0050 : "IDE controler", 0x0051 : "Sungem ethernet controler", 0x0052 : "Firewire controler", 0x0053 : "PCI Bridge", 0x0054 : "PCI Bridge", 0x0055 : "PCI Bridge", 0x0058 : "AGP Bridge", 0x008A : "Mac Pro RAID Card", 0x008C : "AirPort Extreme", }, 0x106C : { 0x8801 : "Dual Pentium ISA/PCI Motherboard", 0x8802 : "PowerPC ISA/PCI Motherboard", 0x8803 : "Dual Window Graphics Accelerator", 0x8804 : "PCI LAN Controller", 0x8805 : "100-BaseT LAN Controller", }, 0x106E : { 0x4362 : "Yukon PCI-E Gigabit Ethernet Controller (copper)", }, 0x1073 : { 0x0001 : "3D graphics Cntrlr", 0x0002 : "RPA3 3D-Graphics Controller", 0x0003 : "", 0x0004 : "PCI Audio Controller", 0x0005 : "DS1 Audio", 0x0006 : "DS1 Audio", 0x0008 : "DS1 Audio", 0x000A : "DS-1L PCI Audio Controller", 0x000C : "DS-1L PCI audio controller", 0x000D : "YamahaDS1 native audio ", 0x0010 : "DS-1 PCI audio controller", 0x0012 : "DS-1E PCI Audio Controller", 0x0020 : "DS-1 Audio", 0x1000 : "Sound system", 0x2000 : "Digital Mixing Card", 0x9876 : "yamaha", }, 0x1074 : { 0x4E78 : "Nx586 Chipset", }, 0x1077 : { 0x1016 : "Single Channel Ultra3 SCSI Processor", 0x1020 : "Fast-wide SCSI - Sparc PCI", 0x1022 : "Fast-wide SCSI", 0x1080 : "SCSI Host Adapter", 0x1216 : "Dual Channel Ultra3 SCSI Processor", 0x1240 : "SCSI Host Adapter", 0x1280 : "SCSI Host Adapter", 0x2020 : "Fast!SCSI Basic Adapter", 0x2100 : "64-bit Fibre Channel Adapter", 0x2200 : "PCI Fibre Channel Adapter", 0x2300 : "64-bit PCI FC-AL Adapter", 0x2312 : "Fibre Channel Adapter", 0x2422 : "QLogic PCI to Fibre Channel Host Adapter for QLA2460", 0x2432 : "4Gb PCI Single/Dual Fibre Channel HBA", 0x2532 : "8Gb PCIe x8 Single/Dual Fibre Channel HBA", 0x3010 : "n/a", 0x3032 : "QLOGIC Dual Port 1GBPS PCI-E HBA", 0x4000 : "", 0x4010 : "", 0x6312 : "Qlogic FC-HBA QLA200", 0x6422 : "4-Gbps Fibre Channel to PCI-X 2.0 266MHz controller for Embedded Applications", 0x6432 : "4-Gbps Fibre Channel to PCIe controller for Embedded Applications", 0x8000 : "QLE8142 QLogic PCI Express to 10 GbE Dual Channel CNA", 0x8001 : "QLE8142 QLogic PCI Express to 10 GbE Dual Channel CNA (FCoE)", 0x8020 : "QLogic Dual Port 10 Gigabit Ethernet CNA", 0x8021 : "QLogic [FCoE] Adapter", 0x8022 : "QLE8142 QLogic PCI Express to 10 GbE Dual Channel CNA (iSCSI)", }, 0x1078 : { 0x0000 : "ISA Bridge", 0x0001 : "Cyrix Integrated CPU", 0x0002 : "ISA Bridge", 0x0100 : "ISA bridge", 0x0101 : "SMI status and ACPI timer", 0x0102 : "IDE Controller", 0x0103 : "XpressAUDIO", 0x0104 : "Video Controller", 0x0400 : "CPU to PCI Bridge", 0x0401 : "Power Management Controller", 0x0402 : "IDE Controller", 0x0403 : "Expansion Bus", }, 0x1079 : { 0x10de : "zdzvz", }, 0x107D : { 0x0000 : "Graphic GLU-Logic", }, 0x107E : { 0x0001 : "FRED Local Bus I/F to PCI Peripheral", 0x0002 : "100 vg anylan Cntrlr", 0x0004 : "Fibre Channel Host Adapter", 0x0005 : "Fibre Channel Host Adapter", 0x0008 : "(i)chipSAR+ 155 MBit ATM controller", 0x9003 : "", 0x9007 : "", 0x9008 : "", 0x900C : "", 0x900E : "", 0x9011 : "", 0x9013 : "", 0x9023 : "", 0x9027 : "", 0x9031 : "", 0x9033 : "Adapter", 0x9060 : "CompactPCI T1/E1/J1Communications Ctrlr", 0x9070 : "PMC T1/E1/J1 Communications Controller", 0x9080 : "PMC ATM Over OC-3/STM-1 Comm Controller", 0x9081 : "PMC ATM Over OC-3/STM-1 Comm Controller", 0x9082 : "PMC ATM Over OC-3/STM-1 Comm Controller", 0x9090 : "PMC ATM Over T3/E3 Communications Ctrlr", 0x90A0 : "PMC Quad T1/E1/J1 Communications Ctrlr", }, 0x107F : { 0x0802 : "pinacale capture card", 0x0803 : "EIDE Bus Master Controller", 0x0806 : "EIDE Controller", 0x2015 : "EIDE Controller", }, 0x1080 : { 0x0600 : "CPU to PCI & PCI to ISA Bridge", 0xC691 : "AN2131QC 0230", 0xC693 : "PCI to ISA Bridge", }, 0x1081 : { 0x0D47 : "Radius PCI to NuBUS Bridge", }, 0x1083 : { 0x0001 : "PCI Enhanced IDE Adapter", 0x0613 : "PCI", }, 0x1085 : { 0x0001 : "Datalaster Interface for OBD automotive", }, 0x1087 : { 0x9200 : "", }, 0x1089 : { 0x5555 : "3249", }, 0x108A : { 0x0001 : "PCI-VME Bus Adapter", 0x0003 : "PCI to VME Bridge", 0x0010 : "VME Bridge", 0x0040 : "", 0x3000 : "VME Bridge", }, 0x108D : { 0x0001 : "Token-Ring 16/4 PCI Adapter", 0x0002 : "Fastload 16/4 PCI/III Token Ring Adapter", 0x0004 : "RapidFire Token Ring 16/4 Adapter", 0x0005 : "GoCard Token Ring 16/4 Adapter", 0x0006 : "RapidFire Token Ring 100 Adapter", 0x0007 : "RapidFire Token Ring 16/4 Adapter", 0x0008 : "RapidFire HSTR 100/16/4 Adapter", 0x000A : "RapidFire Token-Ring 16/4 PCI Adapter", 0x0011 : "Ethernet Controller", 0x0012 : "Ethernet PCI/II 10/100 Controller", 0x0013 : "PCI/II Ethernet Controller", 0x0014 : "Ethernet PCI/II 10/100 Controller", 0x0019 : "10/100 Ethernet Controller", 0x0021 : "155 Mbit ATM Adapter", 0x0022 : "ATM Adapter", }, 0x108E : { 0x0001 : "", 0x1000 : "PCI Input/Output Controller", 0x1001 : "Happy Meal Ethernet", 0x1100 : "", 0x1101 : "", 0x1102 : "", 0x1103 : "", 0x2BAD : "Sun Gigabit Ethernet Card", 0x5000 : "UltraSPARC-IIi Advanced PCI Bridge", 0x5043 : "Co-processor", 0x7063 : "PCI card with Intel or AMD processor", 0x8000 : "UPA to PCI Interface (UPA)", 0x8001 : "PCI Bus Module", 0xA000 : "Sabre", 0xA001 : "Hummingbird", 0xabba : "10/100/1000 Ethernet adapter", }, 0x1091 : { 0x0020 : "3D Graphics Processor", 0x0021 : "3D graphics processor w/texturing", 0x0040 : "3D graphics frame buffer", 0x0041 : "3D graphics frame buffer", 0x0060 : "Proprietary bus Bridge", 0x00E4 : "", 0x0720 : "Motion JPEG Codec", }, 0x1092 : { 0x00A0 : "GUI Accelerator", 0x00A8 : "GUI Accelerator", 0x0550 : "", 0x08D4 : "WinModem", 0x094C : "SupraExpress 56i Pro", 0x09C8 : "SupraExpress 56i Pro VCC", 0x1002 : "RS56-pci", 0x1092 : "2710a", 0x6120 : "DVD", 0x8810 : "GUI Accelerator", 0x8811 : "GUI Accelerator", 0x8880 : "", 0x8881 : "GUI Accelerator", 0x88B0 : "GUI Accelerator", 0x88B1 : "GUI Accelerator", 0x88C0 : "GUI Accelerator", 0x88C1 : "GUI Accelerator", 0x88D0 : "GUI Accelerator", 0x88D1 : "GUI Accelerator", 0x88F0 : "GUI Accelerator", 0x88F1 : "GUI Accelerator", 0x9876 : "Supra Express 56i Pro CW #2", 0x9999 : "Diamand Technology DT0398", }, 0x1093 : { 0x0160 : "data adquisition input and output", 0x0161 : "Multifunction data acquisition board", 0x0162 : "24MIO 6-03-2", 0x1150 : "High Speed Digital I/O Board", 0x1170 : "", 0x1180 : "base system device", 0x1190 : "", 0x11B0 : "", 0x11C0 : "", 0x11D0 : "", 0x11E0 : "", 0x1270 : "Multifunction Data Acquisition Card", 0x12b0 : "High Speed DIO", 0x1310 : "Data Acquisition Device", 0x1320 : "", 0x1330 : "", 0x1340 : "Multifunction Data Acquisition Card", 0x1350 : " NI PCI-6071E Multifunction I/O & NI-DAQ", 0x1360 : "", 0x14e0 : "PCI611X", 0x17D0 : "", 0x18B0 : "", 0x28b0 : "I/O Terminal NI-DAQ (Legacy) and NI-DAQmx", 0x2A60 : "", 0x2A70 : "Multifunction Data Acquisition Card", 0x2A80 : "Multifunction Data Acquisition Card", 0x2B20 : "", 0x2C80 : "", 0x2CA0 : "PCI-6034E", 0x702C : "NI FPGA Modul", 0x70af : "16-Bit", 0x70b8 : "Multifunction DAQ Device", 0x70E3 : "NI PXI-8431/8 (RS485/RS422)", 0x70E4 : "NI PCI-8430/8 (RS-232) Interface", 0x710e : "GPIB Controller Interface Board", 0x7146 : "NI-PCI-6132", 0x71BC : "16-Bit", 0x7414 : "NI PCIe-GPIB+ GPIB with analyzer", 0xB001 : "", 0xB011 : "", 0xB021 : "", 0xB031 : "", 0xB041 : "1pcs", 0xB051 : "", 0xB061 : "", 0xB071 : "IMAQ-PCI-1422", 0xB081 : "", 0xB091 : "bluethooth", 0xC4C4 : "NI PCIe-1433 extended (deca) Camera Link frame grabber", 0xC801 : "GPIB Controller Interface Board", 0xC811 : "", 0xC821 : "", 0xC831 : "PCI-GPIB", 0xC840 : "", 0xd130 : "2-port RS-232 Serial Interface Board", }, 0x1095 : { 0x0240 : "SIL3112", 0x0242 : "SIL3132", 0x0244 : "SIL3132", 0x0640 : "PCI0640A/B", 0x0641 : "pci0640", 0x0642 : "PCI0642", 0x0643 : "PCI0643", 0x0646 : "CMD646", 0x0647 : "PCI0647", 0x0648 : "PCI-648", 0x0649 : "PCI-649", 0x0650 : "PBC0650A", 0x0670 : "USB0670", 0x0673 : "USB0673", 0x0680 : "SiI 0680/680A", 0x1025 : "PCI0647", 0x1392 : "1390/1392", 0x2455 : "SI3124", 0x3112 : "SIL3112", 0x3114 : "Sil 3114", 0x3124 : "SiI 3124", 0x3132 : "SiI 3132", 0x3512 : "Sil 3512", 0x3531 : "3531", 0x9876 : "0x9876", }, 0x1096 : { 0x1106 : "Realtek AC97 Audio for VIA (R) Audio Controller", 0x3059 : "South Bridge", }, 0x1097 : { 0x0038 : "EIDE Controller (single FIFO)", }, 0x1098 : { 0x0001 : "EIDE Controller", 0x0002 : "EIDE Controller", }, 0x109A : { 0x8280 : "4 channel video digitizer cardm", }, 0x109E : { 0x0350 : "rb8701.1", 0x0350 : "tv tuner driverhj", 0x0351 : "BrookTree Bt848 Video Capture Device - Audio Section PCI", 0x0369 : "Video Capture", 0x036C : "", 0x036E : "AVerMediaAverTV WDM AudioCapture (878)", 0x036E : "Video Capture", 0x036E : "Video Capture", 0x036F : "Video Capture", 0x0370 : "Video Capture (10 bit High qualtiy cap)", 0x0878 : "Multimedia Controller Conexant Fusion 878A 25878-13 0549Y1JF", 0x0879 : "Video Capture (Audio Section)", 0x0880 : "Video Capture (Audio Section)", 0x109E : "Multimedia Video Controllerm", 0x109E : "0400 video devce", 0x2115 : "BtV Mediastream Controller 9x", 0x2125 : "BtV Mediastream Controller", 0x2164 : "Display Adapter", 0x2165 : "MediaStream Controller", 0x36e : "25878-13", 0x8230 : "ATM Segment/Reassembly Controller (SRC)", 0x8472 : "32/64-channel HDLC Controllers", 0x8474 : "128-channel HDLC Controller", }, 0x109F : { 0x036F : "Video Capturee", }, 0x10A4 : { 0X5969 : "", }, 0x10A8 : { 0x0000 : "ethernet controller", }, 0x10A9 : { 0x0004 : "", 0x0005 : "", 0x0006 : "", 0x0007 : "", 0x0008 : "", 0x0009 : "Gigabit Ethernet", 0x0010 : "Video I/O", 0x0011 : "", 0x0012 : "", 0x1001 : "", 0x1002 : "", 0x1003 : "", 0x1004 : "", 0x1005 : "", 0x1006 : "", 0x1007 : "", 0x1008 : "", 0x2001 : "Fibre Channel", 0x2002 : "", 0x8001 : "", 0x8002 : "", }, 0x10AB : { 0x1005 : "USB Pendrive", 0x1007 : "usb pendrive", 0x8086 : "PCI Simple Communications Controller ", }, 0x10AD : { 0x0001 : "EIDE Ctrlr", 0x0103 : "PCI-ide mode 4.5 Cntrlr", 0x0105 : "Sonata bus master PCI-IDE controller", 0x0565 : "PCI/ISA bridge", }, 0x10B5 : { 0x0324 : "", 0x0480 : "Integrated PowerPC I/O Processor", 0x0960 : "PCI Reference Design Kit for PCI 9080", 0x1030 : "ISDN card", 0x1054 : "dual channel ISDN card", 0x1078 : "Vision Systems VScom PCI-210", 0x1103 : "Vision Systems VScom PCI-200", 0x1146 : "Vision Systems VScom PCI-010S", 0x1147 : "Vision Systems VScom PCI-020S", 0x1151 : "ISDN card", 0x1152 : "ISDN card", 0x2724 : "Thales PCSM Security Card", 0x2748 : "TPCX Transientrecorder Card", 0x3001 : "gpscard", 0x5406 : "PCI Reference Design Kit for PLX PCI 9054", 0x5601 : "32-bit; 66MHz PCI Bus Master I/O Accelerator", 0x6520 : "PCI-X to PCI-X Bridge", 0x6ACC : "General Mechatronics 6 Axis Motion Control Card for EMC2", 0x8111 : "1 Lane PCI Express to PCI bridge (PEX8111); 1 Lane PCI Express to Generic Local Bus bridge (PEX8311)", 0x8112 : "1 Lane PCI Express to PCI bridge", 0x8508 : "8 Lane", 0x8509 : "8-lane PCI-Express Switch", 0x8516 : "Versatile PCI Express Switch", 0x8518 : "PLX PCI-e switch", 0x8532 : "Versatile PCI Express Switch", 0x8548 : "48-lane PCIe switch", 0x8609 : "8 Lane", 0x8664 : "64-Lane", 0x8748 : "48-Lane", 0x9030 : "PCI SMARTarget I/O Accelerator", 0x9036 : "Interface chip - value 1k", 0x9050 : "Target PCI Interface Chip - value 1k", 0x9052 : "PCI 9052 Target PLX PCI Interface Chip", 0x9054 : "PCI I/O Accelerator", 0x9056 : "32-bit", 0x9060 : "PCI Bus Master Interface Chip", 0x906D : "PCI Bus Master Interface Chip", 0x906E : "PCI Bus Master Interface Chip", 0x9080 : "High performance PCI to Local Bus chip", }, 0x10B6 : { 0x0001 : "Ringnode (PCI1b)", 0x0002 : "Ringnode (PCIBM2/CardBus)", 0x0003 : "Ringnode", 0x0004 : "Smart 16/4 Ringnode Mk1 (PCIBM1)", 0x0006 : "16/4 CardBus Adapter (Eric 2)", 0x0007 : "", 0x0009 : "Smart 100/16/4 PCi-HS Ringnode", 0x000A : "Smart 100/16/4 PCI Ringnode", 0x000B : "16/4 CardBus Adapter Mk2", 0x1000 : "ATM adapter", 0x1001 : "ATM adapter", 0x1002 : "ATM Adapter", }, 0x10B7 : { 0x0001 : "1000BaseSX Gigabit Etherlink", 0x0013 : "3Com11a/b/g Wireless PCI Adapter ", 0x1000 : "3COM 3C905CX-TXNM with 40-0664-003 ASIC", 0x1006 : "Broadcom Corporation NetXtreme BCM5701 Gigabit Ethernet", 0x1007 : "V.90 Mini-PCI Modem", 0x1700 : "Gigabit Ethernet PCI CODEC", 0x1F1F : "AirConnect Wireless LAN PCI Card", 0x3390 : "Token Link Velocity", 0x3590 : "TokenLink Velocity XL Adapter", 0x4500 : "Cyclone", 0x5055 : "Laptop Hurricane", 0x5057 : "Megahertz 10/100 LAN CardBus PC Card", 0x5157 : "Megahertz 10/100 LAN CardBus PC Card", 0x5257 : "Cyclone Fast Ethernet CardBus PC Card", 0x5900 : "Ethernet III Bus Fast PCI", 0x5920 : "PCI/EISA 10Mbps Demon/Vortex", 0x5950 : "100MB PCI Ethernet Adapter", 0x5951 : "Fast EtherLink PCI T4", 0x5952 : "Fast EtherLink PCI MII", 0x5970 : "PCI/EISA Fast Demon/Vortex", 0x5B57 : "Megahertz 10/100 LAN CardBus", 0x6055 : "10/100 Fast Ethernet MiniPCI Adapter", 0x6056 : "MiniPCI 10/100 Ethernet+Modem56k (see devid:1007)", 0x6560 : "Cyclone CardBus PC Card", 0x6561 : "10/100 LAN+56K Modem CardBus PC Card", 0x6562 : "Cyclone CardBus PC Card", 0x6563 : "10/100 LAN+56K Modem CardBus PC Card", 0x6564 : "Cyclone CardBus PC Card", 0x6565 : "Global 10/100 Fast Ethernet+56K Modem", 0x7646 : "3com", 0x7770 : "AirConnect Wireless PCI", 0x8811 : "Token Ring", 0x9000 : "Fast Etherlink PCI TPO NIC", 0x9001 : "Fast Etherlink XL PCI Combo NIC", 0x9004 : "EtherLink XL TPO 10Mb", 0x9005 : "Fast Etherlink 10Mbps Combo NIC", 0x9006 : "EtherLink XL TPC", 0x900A : "EtherLink PCI Fiber NIC", 0x9041 : "Fast Etherlink XL 10/100", 0x9050 : "Fast Etherlink XL PCI 10/100", 0x9051 : "Fast Etherlink XL 10/100", 0x9055 : "Fast Etherlink 10/100 PCI TX NIC", 0x9056 : "Fast EtherLink XL 10/100", 0x9058 : "Deluxe EtherLink 10/100 PCI Combo NIC", 0x905A : "Fast EtherLink 100 Fiber NIC", 0x9200 : "3Com 10/100 Managed NIC 3C905CX-TX-M", 0x9201 : "Integrated Fast Ethernet Controller", 0x9202 : "3C920B-EMB 3Com + Realtek 8201L", 0x9210 : "Integrated Fast Ethernet Controller", 0x9300 : "3ComSOHO100B-TX", 0x9800 : "Fast EtherLink XL Server Adapter2", 0x9805 : "Python-T 10/100baseTX NIC", 0x9876 : "3C920B-EMB 3Com + Realtek 8201L", 0x9902 : "EtherLink 10/100 PCI with 3XP Processor", 0x9903 : "EtherLink 10/100 PCI with 3XP Processor", 0x9905 : "100FX PCI Server NIC w/3XP", 0x9908 : "EtherLink 10/100 Server PCI with 3XP", 0x9909 : "EtherLink 10/100 Server PCI with 3XP", 0xD004 : "EtherLink XL PCI", }, 0x10B8 : { 0x0005 : "EPIC/XF 10/100 Mbps Fast Ethernet Ctrlr", 0x0006 : "EPIC/C Ethernet CardBus Integrated Ctrlr", 0x1000 : "FDC", 0x1001 : "FDC", 0xA011 : "Fast ethernet controller", 0xB106 : "CardBus Controller", }, 0x10B9 : { 0x0101 : "PCI Audio Device (OEM)", 0x0102 : "PCI Audio Device (OEM)", 0x0111 : "C-Media Audio Device (OEM)", 0x0780 : "Multi-IO Card", 0x0782 : "Multi-IO Card", 0x10b9 : "0402t505 CK46828100B", 0x10CE : "cpi", 0x1435 : "VL Bridge", 0x1445 : "CPU to PCI & PCI to ISA Bridge w/EIDE", 0x1449 : "ISA Bridge", 0x1451 : "Pentium CPU to PCI Bridge", 0x1461 : "P54C Chipset", 0x1489 : "486 PCI Chipset", 0x1511 : "Aladdin 2 Host Bridge", 0x1513 : "Aladdin 2 South Bridge", 0x1521 : "Bios", 0x1523 : "ISA Bridge", 0x1533 : "PCI South Bridge", 0x1535 : "ISA Bridge", 0x1541 : "Aladdin V AGPset Host Bridge", 0x1543 : "Aladdin V chipset South Bridge", 0x1561 : "North Bridge", 0x1563 : "South Bridge with Hypertransport Support", 0x1632 : "North Bridge", 0x1641 : "CPU to PCI Bridge", 0x1644 : "AGP System Controller", 0x1646 : "AGP System Controller", 0x1647 : "CPU to PCI Bridge", 0x1651 : "CPU to PCI Bridge", 0x1661 : "AGP System Controller", 0x1667 : "AGP System Controller", 0x1671 : "Super P4 Nouth Bridge", 0x1672 : "AGP System Controller", 0x1681 : "P4 Nouth Bridge with HyperTransport", 0x1687 : "K8 North Bridge with HyperTransport", 0x1849 : "023&267A616A", 0x3141 : "GUI Accelerator", 0x3143 : "GUI Accelerator", 0x3145 : "GUI Accelerator", 0x3147 : "GUI Accelerator", 0x3149 : "GUI Accelerator", 0x3151 : "GUI Accelerator", 0x3307 : "MPEG-1 Decoder", 0x3309 : "MPEG Decoder", 0x3432 : "131312", 0x5212 : "", 0x5215 : "EIDE Ctrlr", 0x5217 : "I/O (?)", 0x5219 : "Ali M5219 PCI BUS MASTER IDE Controller", 0x5225 : "IDE Controller", 0x5228 : "M5228 PATA/RAID Controller", 0x5229 : "EIDE Controller", 0x5229 : "Ali EIDE", 0x5229 : "PATA 33", 0x5229 : "PATA 66", 0x5229 : "PATA 100", 0x5229 : "PATA 133", 0x5235 : "ALI M6503c", 0x5236 : "EHCI USB 2.0", 0x5237 : "OpenHCI 1.1 USB to 2.0", 0x5239 : "USB EHCI2.0 Controller", 0x5249 : "HyperTransport to PCI Bridge", 0x5251 : "IEEE P1394 OpenHCI 1.0 Controller", 0x5253 : "IEEE P1394 OpenHCI 1.0 Controller", 0x5261 : "Ethernet Controller", 0x5263 : "ULi PCI Fast Ethernet Controller", 0x528 : "023&267A616A", 0x5281 : "ALI M5281/5283 SATA/RAID Controller", 0x5286 : "Realtek PCIE CardReader", 0x5287 : "SATA/Raid controller", 0x5288 : "M5288 SATA/Raid controller (Asrock 939SLI32-eSata2)", 0x5289 : "M5289 SATA/Raid controller", 0x5450 : "Agere Systems AC97 Modem", 0x5451 : "Ali Audio Accelerator", 0x5455 : "AC'97 Audio Controller", 0x5457 : "AC97 Modem controller", 0X5459 : "PCI Soft Modem V92 NetoDragon", 0x5461 : "High Definition Audio Controller", 0x5471 : "Memory Stick Host", 0x5473 : "MMC/SD controller", 0x7101 : "Power Management Controller", 0x7471 : "Memory Stick Host", 0x9876 : "xhcth700000b", }, 0x10BA : { 0x0304 : "GUI Accelerator", }, 0x10BD : { 0x0803 : "Ethernet PCI Adapter", 0x0E34 : "Ethernet Adapter (NE2000 PCI clone)", 0x5240 : "IDE Cntrlr", 0x5241 : "PCMCIA Bridge", 0x5242 : "General Purpose Cntrlr", 0x5243 : "Bus Cntrlr", 0x5244 : "FCD Cntrlr", 0x8136 : "Unkown", 0x8139 : "realtek 8139c", }, 0x10C3 : { 0x8920 : "MCP67 High Definition Audio ", 0x8925 : "", }, 0x10C4 : { 0x8363 : "10C4", 0xEA60 : "Silicon Labs CP210x USB to UART Bridge", }, 0x10C8 : { 0004 : "MagicGraph 128XD", 0x0000 : "Graphics Cntrlr", 0x0003 : "MagicGraph 128ZV Video Controller", 0x0004 : "MagicGraph 128XD", 0x0005 : "MagicMedia 256AV", 0x0006 : "MagicMedia 256ZX/256M6D", 0x0016 : "MagicMedia 256XL+", 0x0025 : "MagicMedia 256AV+", 0x0083 : "Graphic Controller NeoMagic MagicGraph128ZV+", 0x8005 : "MagicMedia 256AV Audio Device", 0x8006 : "MagicMedia 256ZX Audio Device", 0x8016 : "MagicMedia 256XL+ Audio Device", }, 0x10CA : { 0x9876 : "PCIVEN_1217&DEV_7110&SUBSYS_106A1734&REV_00", }, 0x10CD : { 0x1100 : "PCI SCSI Host Adapter", 0x1200 : "Fast SCSI-II", 0x1300 : "ABP-3925", 0x2300 : "PCI Ultra Wide SCSI Host Adapter", 0x2500 : "PCI Ultra 80/160 SCSI Controllers", 0x4000 : "IEEE-1394 OHCI PCI Controller", }, 0x10CF : { 0x10C5 : "Serial Parallel Card", 0x2001 : "PCI SCSI Host Adapter (Fast Wide SCSI-2)", 0x2002 : "Fast Wide SCSI Controller", 0x2005 : "10/100 Fast Ethernet Adapter", 0x200C : "IEEE1394 OpenHCI Controller", 0x2010 : "OHCI FireWire Controller", 0x2011 : "MPEG2 R-Engine (MPEG2 Hardware Encoder)", 0x2019 : "Coral-P Graphics Chip", 0x201E : "Coral-PA Graphics Chip", 0x202A : "u/k", 0x202B : "Carmine Graphisc adapter", }, 0x10D6 : { 0xFF51 : "ATJ2091N", 0xff66 : "ATJ2091N", }, 0x10D9 : { 0x0066 : "sdas", 0x0512 : "Fast Ethernet Adapter", 0x0531 : "Single Chip Fast Ethernet NIC Controller", 0x0532 : "PCI/CardBus Fast Ethernet Controller", 0x0553 : "Ethernet Adapter", 0x8625 : "xiankasqudong", 0x8626 : "PCIVEN_10D9&DEV_8626&SUBSYS_00000000&REV_004&1F7DBC9F&0&08F0 ", 0x8627 : "Voodoo Rush MX86251", 0x8888 : "9619E", 0xC115 : " Linksys LNE100TX ", }, 0x10DC : { 0x0001 : "PCI-SCI PMC mezzanine", 0x0002 : "SCI bridge on PCI 5 Volt card", 0x0004 : "ALTERA STRATIX", 0x0010 : "Simple PMC/PCI to S-LINK interface", 0x0011 : "Simple S-LINK to PMC/PCI interface", 0x0012 : "32-bit S-LINK to 64-bit PCI interface", 0x0021 : "HIPPI destination", 0x0022 : "HIPPI source", 0x0033 : "ALICE DDL to PCI interface (RORC)", 0x0101 : "Acquisition card for the SPS Orbit System (MACI)", 0x016A : "CALICE ODR", 0x10DC : "TTC sr first TTC chip receiver PMC", 0x301 : "based on the PLX PCI 9030 to build a MIL1553 bus interface", 0x324 : "64 Bit/66MHz PCI to Local Bus Bridge", 0x8086 : "geodelink pci south", }, 0x10DD : { 0x0001 : "3D graphics processor", }, 0x10DE : { 0x04EF : "Riva 128", 0x0001 : "SoundMAX Integrated Digital Audio", 0x0002 : "HDMI Audio Driver Driver", 0x0003 : "nVIDIA High Definition Audio/HDMI ", 0x0006 : "realtek based HD Audio", 0x0008 : "Edge 3D", 0x0009 : "Edge 3D", 0x000B : "HDMI Audio Driver Driver 1.00.00.59", 0x0010 : "Mutara V08", 0x0011 : "NVIDIA High Def Audio", 0x0018 : "Riva 128", 0x0019 : "Riva 128ZX", 0x001D : "nVidia GeForce FX 5900XT", 0x0020 : "NVIDIA RIVA TNT", 0x0028 : "MCP67 ", 0x0028 : "ACPINSC1200", 0x0029 : "NVIDIA RIVA TNT 2 Ultra", 0x002A : "TNT2", 0x002B : "Riva TNT2", 0x002C : "NVIDIA Vanta/Vanta LT", 0x002D : "NVIDIA RIVA TNT2 Model 64/Model 64 AGP 32M", 0x002E : "VANTA", 0x002F : "VANTA", 0x0035 : "MCP04 PATA Controller", 0x0036 : "MCP04 SATA/RAID Controller", 0x003E : "MCP04 SATA/RAID Controller", 0x0040 : "NVIDIA GeForce 6800 Ultra", 0x0041 : "NVIDIA GeForce 6800", 0x0042 : "NVIDIA GeForce 6800 LE", 0x0043 : "NVIDIA GeForce 6800 XE", 0x0044 : "NVIDIA GeForce 6800 XT", 0x0045 : "NVIDIA GeForce 6800 GT", 0x0046 : "NVIDIA GeForce 6800 GT", 0x0047 : "NVIDIA GeForce 6800 GS", 0x0048 : "NVIDIA GeForce 6800 XT", 0x0049 : "NVIDIA NV40GL", 0x004D : "NVIDIA Quadro FX 3400", 0x004E : "NVIDIA Quadro FX 4000", 0x0052 : "NVIDIA nForce PCI System Management", 0x0053 : "CK804 PATA Controller", 0x0054 : "CK804 SATA/RAID Controller", 0x0055 : "CK804 SATA/RAID Controller", 0x0057 : "NVIDIA Network Bus Enumerator", 0x0059 : "nForce Audio Controller", 0x005E : "nForce4 HyperTransport Bridge", 0x0060 : "PCI to ISA Bridge", 0x0064 : "SMBus Controller", 0x0065 : "PATA Controller", 0x0066 : "nForce 2 Networking Controller", 0x0067 : "Nvidia 7050 chipset HDMI Audio", 0x0068 : "EHCI USB 2.0 Controller", 0x006A : "nForce AC97s", 0x006B : "Audio Processing Unit (Dolby Digital)", 0x006C : "PCI to PCI Bridge", 0x006D : "Audio Codec Interface", 0x006E : "OHCI Compliant IEEE 1394 Controller", 0x0085 : "MCP2S PATA Controller", 0x008C : "Single-Port 10/100M Fast Ethernet PHYceiver", 0x008E : "MCP2S SATA/RAID Controller", 0x0090 : "NVIDIA GeForce 7800 GTX", 0x0091 : "NVIDIA GeForce 7800 GTX", 0x0092 : "NVIDIA GeForce 7800 GT", 0x0093 : "NVIDIA GeForce 7800 GS", 0x0094 : "NVIDIA GeForce 7800SE/XT/LE/LT/ZT", 0x0095 : "NVIDIA GeForce 7800 SLI", 0x0098 : "NVIDIA GeForce Go 7800", 0x0099 : "NVIDIA GeForce Go 7800 GTX", 0x009C : "NVIDIA Quadro FX 350M", 0x009D : "NVIDIA Quadro FX 4500", 0x009E : "NVIDIA G70GL", 0x00A0 : "Aladdin TNT2", 0x00C0 : "NVIDIA GeForce 6800 GS", 0x00C1 : "NVIDIA GeForce 6800", 0x00C2 : "NVIDIA GeForce GTX670M", 0x00C3 : "NVIDIA GeForce 6800 XT", 0x00C8 : "NVIDIA GeForce Go 6800", 0x00C9 : "NVIDIA GeForce Go 6800 Ultra", 0x00CC : "NVIDIA Quadro FX Go 1400", 0x00CD : "NVIDIA Quadro FX 3450/4000 SDI", 0x00CE : "NVIDIA Quadro FX 1400", 0x00D0 : "LPC Bridge", 0x00D1 : "Host Bridge", 0x00D2 : "PCI-to-PCI Bridge", 0x00D4 : "SMBus Controller", 0x00D5 : "CK8 PATA 133/PATA to SATA Bridge", 0x00D6 : "nForce 3 Networking Controller", 0x00D7 : "OpenHCD USB Host Controller", 0x00D8 : "Enhanced PCI to USB Host Controller", 0x00D9 : "Agere System PCI Soft Modem", 0x00DA : "AC97 Audio Controller", 0x00DD : "PCI-to-PCI Bridge", 0x00DF : "nForce3 Networking Controller", 0x00E0 : "LPC Interface Bridge", 0x00E1 : "Host/PCI Bridge", 0x00E2 : "AGP Host to PCI Bridge", 0x00E3 : "CK8S SATA/RAID Controller", 0x00E4 : "PCI System Management", 0x00E5 : "Parallel ATA Controller", 0x00E7 : "OpenHCD USB Controller", 0x00E8 : "Enhanced PCI to USB Controller", 0x00EA : "Audio Codec Interface (Realtek ALC658)", 0x00ED : "PCI-PCI Bridge", 0x00EE : "CK8S SATA/RAID Controller", 0x00F0 : "NVIDIA Device", 0x00F1 : "NVIDIA GeForce 6600 GT", 0x00F2 : "NVIDIA GeForce 6600", 0x00F3 : "NVIDIA GeForce 6200", 0x00F4 : "NVIDIA GeForce 6600 gt", 0x00F5 : "NVIDIA GeForce 7800 GS", 0x00F6 : "NVIDIA GeForce 6800 GS/XT", 0x00F8 : "NVIDIA Quadro FX 3400/4400", 0x00F9 : "NVIDIA GeForce 6800 Series GPU", 0x00FA : "NVIDIA GeForce PCX 5750", 0x00FB : "NVIDIA GeForce PCX 5900", 0x00FC : "NVIDIA GeForce PCX 5300", 0x00FD : "NVIDIA Quadro PCI-E Series", 0x00FE : "NVIDIA Quadro FX 1300", 0x00FF : "NVIDIA GeForce PCX 4300", 0x0100 : "HDAUDIOFUNC_01&VEN_10EC&DEV_0269&SUBSYS_11790644&REV_10024&1422899C&0&0001", 0x0101 : "NVIDIA GeForce DDR", 0x0102 : "GeForce 256 Ultra", 0x0103 : "NVIDIA Quadro", 0x0110 : "NVIDIA GeForce2 MX/MX 400", 0x0111 : "NVIDIA GeForce2 MX 100/200", 0x0112 : "NVIDIA GeForce 9800gt", 0x0112 : "Nvidia GeForce2 Go/MX Ultra Video Adapter", 0x0113 : "NVIDIA Quadro2 MXR/EX", 0x0140 : "NVIDIA GeForce 6600 GT", 0x0141 : "nVIDIA GeForce 6600 PCI-E Video Adapter", 0x0142 : "NVIDIA GeForce 6600 LE", 0x0143 : "NVIDIA GeForce 6600 VE", 0x0144 : "NVIDIA GeForce Go 6600", 0x0145 : "NVIDIA GeForce 6610 XL", 0x0146 : "NVIDIA GeForce Go 6200 TE/6600 TE", 0x0147 : "NVIDIA GeForce 6700 XL", 0x0148 : "NVIDIA GeForce Go 6600", 0x0149 : "NVIDIA GeForce Go 6600 GT", 0x014A : "NVIDIA Quadro NVS 440", 0x014B : "NVIDIA NV43", 0x014C : "NVIDIA Quadro FX 540M", 0x014D : "NVIDIA Quadro FX 550", 0x014E : "NVIDIA Quadro FX 540", 0x014F : "NVIDIA GeForce 6200 go", 0x0150 : "NVIDIA GeForce2 GTS/GeForce2 Pro", 0x0151 : "NVIDIA GeForce2 Ti", 0x0152 : "NVIDIA GeForce2 Ultra", 0x0153 : "NVIDIA Quadro2 Pro", 0x016 : "1", 0x0160 : "NVIDIA GeForce 6500 ", 0x0161 : "NVIDIA GeForce 6200 TurboCache(TM)", 0x0162 : "NVIDIA GeForce 6200SE TurboCache(TM)", 0x0163 : "NVIDIA GeForce 6200 LE", 0x0164 : "NVIDIA NV44", 0x0165 : "NVIDIA Quadro NVS 285", 0x0166 : "NVIDIA GeForce Go 6250", 0x0167 : "NVIDIA GeForce Go 6200", 0x0168 : "NVIDIA GeForce Go 6400", 0x0169 : "NVIDIA GeForce 6250", 0x016a : "NVIDIA GeForce 7100 GS", 0x016B : "NVIDIA NV44GLM", 0x016C : "NVIDIA NV44GLM", 0x016D : "NVIDIA NV44GLM", 0x016E : "NVIDIA NV44GL", 0x0170 : "NVIDIA GeForce4 MX 460", 0x0171 : "NVIDIA GeForce4 MX 440 with AGP 4X 64mb", 0x0172 : "NVIDIA GeForce4 MX 420", 0x0173 : "NVIDIA GeForce4 MX 440-SE", 0x0174 : "NVIDIA GeForce4 MX 440 Go", 0x0175 : "NVIDIA GeForce4 MX 420 Go", 0x0176 : "NVIDIA GeForce4 MX 420 Go 32M", 0x0177 : "NVIDIA GeForce4 460 Go", 0x0178 : "NVIDIA Quadro4 550 XGL", 0x0179 : "NVIDIA GeForce4 MX 440 Go 64M", 0x017A : "NVIDIA Quadro NVS", 0x017B : "Quadro4 550 XGL", 0x017C : "NVIDIA Quadro4 500 Go GL", 0x017D : "NVIDIA GeForce4 410 Go 16M", 0x0181 : "NVIDIA GeForce4 MX 440 with AGP8X", 0x0182 : "NVIDIA GeForce4 MX 440SE with AGP8X", 0x0183 : "NVIDIA GeForce4 MX 420 with AGP8X", 0x0185 : "NVIDIA GeForce4 MX 4000 128 mb 64 bit", 0x0186 : "NVIDIA GeForce4 448 Go", 0x0187 : "NVIDIA GeForce4000 Go", 0x0188 : "NVIDIA Quadro4 580 XGL", 0x018A : "NVIDIA Quadro NVS with AGP8X", 0x018B : "NVIDIA Quadro4 380 XGL", 0x018C : "NVIDIA Quadro NVS 50 PCI", 0x018D : "NVIDIA GeForce4 448 Go", 0x0191 : "NVIDIA GeForce 8800 GTX", 0x0193 : "NVIDIA GeForce 8800 GTS", 0x0194 : "NVIDIA GeForce 8800 Ultra", 0x0197 : "NVIDIA Tesla C870", 0x019D : "NVIDIA Quadro FX 5600", 0x019E : "NVIDIA Quadro FX 4600", 0x01A0 : "NVIDIA GeForce2 Integrated GPU", 0x01A4 : "AGP Controller", 0x01A5 : "AGP Controller", 0x01A6 : "AGP Controller", 0x01A8 : "Memory Controller (SDR) ddr3", 0x01A9 : "Memory Controller (SDR)", 0x01AA : "Memory Controller (DDR)", 0x01AB : "Memory Controller (DDR)", 0x01AC : "Memory Controller", 0x01AD : "Memory Controller", 0x01B0 : "nForce Dolby Digital Audio Controller", 0x01B1 : "nForce AC'97 Audio Controller", 0x01B2 : "HUB Interface", 0x01B4 : "nForce 1/2 SMBus Controller", 0x01B7 : "AGP Bridge", 0x01B8 : "PCI Bridge", 0x01BC : "nForce IDE/ATA Controller", 0x01C1 : "AC97 Modem", 0x01C2 : "OHCI USB Controller", 0x01C3 : "nForce Networking Controller", 0x01D0 : "NVIDIA GeForce 7350 LE", 0x01D1 : "NVIDIA GeForce 7300 LE", 0x01D2 : "NVIDIA GeForce 7550 LE", 0x01D3 : "NVIDIA GeForce 7300 SE/7200 GS", 0x01D5 : "NVIDIA GeForce 7300 LE", 0x01D7 : "NVIDIA GeForce Go 7300", 0x01D8 : "NVIDIA GeForce Go 7400", 0x01DB : "NVIDIA Quadro NVS 120M", 0x01DC : "NVIDIA Quadro FX 350M", 0x01DD : "NVIDIA GeForce 7500 LE", 0x01DE : "NVIDIA Quadro FX 350", 0x01DF : "NVIDIA GeForce 7300 GS", 0x01E0 : "AGP Controller", 0x01E1 : "AGP Controller", 0x01E8 : "AGP Host to PCI Bridge", 0x01EA : "Memory Controller 0", 0x01EB : "Memory Controller 1", 0x01EC : "Memory Controller 2", 0x01ED : "Memory Controller 3", 0x01EE : "Memory Controller 4", 0x01EF : "Memory Controller 5", 0x01F0 : "NVIDIA GeForce4 MX Integrated GPU", 0x0200 : "NVIDIA GeForce3", 0x0201 : "NVIDIA GeForce3 Ti 200", 0x0202 : "NVIDIA GeForce3 Ti 500", 0x0203 : "NVIDIA Quadro DCC", 0x0210 : "NVIDIA NV48", 0x0211 : "NVIDIA GeForce 6800", 0x0212 : "NVIDIA GeForce 6800 LE", 0x0215 : "NVIDIA GeForce 6800 GT", 0x0218 : "NVIDIA GeForce 6800 XT", 0x0220 : "NVIDIA NV44", 0x0221 : "nVidia Geforce 6200 AGP", 0x0222 : "NVIDIA GeForce 6200 A-LE", 0x0228 : "NVIDIA NV44M", 0x0240 : "NVIDIA GeForce 6150", 0x0241 : "NVIDIA GeForce 6150 LE", 0x0242 : "NVIDIA GeForce 6100", 0x0243 : "PCI Express Bridge", 0x0244 : "Geforce Go 6150", 0x0245 : "NVIDIA Quadro NVS 210S / NVIDIA GeForce 6150LE", 0x0246 : "PCI Express Bridge", 0x0247 : "Geforce 6100 Go", 0x0248 : "PCI Express Bridge", 0x0249 : "PCI Express Bridge", 0x024A : "PCI Express Bridge", 0x024B : "PCI Express Bridge", 0x024C : "PCI Express Bridge", 0x024D : "PCI Express Bridge", 0x024E : "PCI Express Bridge", 0x024F : "PCI Express Bridge", 0x0250 : "NVIDIA GeForce4 Ti 4600", 0x0251 : "NVIDIA GeForce4 Ti 4400", 0x0252 : "NVIDIA GeForce4 Ti", 0x0253 : "NVIDIA GeForce4 Ti 4200", 0x0258 : "NVIDIA Quadro4 900 XGL", 0x0259 : "NVIDIA Quadro4 750 XGL", 0x025B : "NVIDIA Quadro4 700 XGL", 0x0260 : "PCI standard ISA bridge", 0x0264 : "NVIDIA nForce PCI System Management", 0x0265 : "Standard Dual Channel PCI IDE Controller", 0x0266 : "NVIDIA nForce 430/410 Serial ATA Controller", 0x0267 : "NVIDIA nForce 430/410 Serial ATA Controller", 0x0268 : "NVIDIA nForce Networking Controller", 0x0269 : "NVIDIA nForce 10/100 Mbps Ethernet", 0x026B : "MCP51 AC'97 Audio ", 0x026C : "Realtek HD Audio Driver", 0x026D : "Standard OpenHCD USB Host Controller", 0x026e : "MCP51 USB Controller", 0x026F : "PCI standard PCI-to-PCI bridge", 0x0270 : "nForce Memory Controller", 0x0271 : "NVIDIA nForce System Management Controller", 0x027E : "nForce Memory Controller", 0x027F : "nForce Memory Controller", 0x0280 : "NVIDIA GeForce4 Ti 4800", 0x0281 : "NVIDIA GeForce4 Ti 4200 with AGP8X", 0x0282 : "NVIDIA GeForce4 Ti 4800 SE", 0x0286 : "NVIDIA GeForce4 4200 Go", 0x0288 : "NVIDIA Quadro4 980 XGL", 0x0289 : "NVIDIA Quadro4 780 XGL", 0x028C : "NVIDIA Quadro4 700 Go GL", 0x0290 : "NVIDIA GeForce 7900 GTX", 0x0291 : "NVIDIA GeForce 7900 GT/GTO", 0x0292 : "NVIDIA GeForce 7900 GS", 0x0293 : "NVIDIA GeForce 7950 GX2", 0x0294 : "NVIDIA GeForce 7950 GX2", 0x0295 : "NVIDIA GeForce 7950 GT", 0x0297 : "NVIDIA GeForce Go 7950 GTX", 0x0298 : "NVIDIA GeForce Go 7900 GS", 0x0299 : "NVIDIA GeForce Go 7900 GTX", 0x029B : "NVIDIA Quadro FX 1500M", 0x029C : "NVIDIA Quadro FX 5500", 0x029D : "NVIDIA Quadro FX 3500", 0x029E : "NVIDIA Quadro FX 1500", 0x029F : "NVIDIA Quadro FX 4500 X2", 0x02A0 : "NVIDIA NV2A GeForce 3 Integrated (XBOX)", 0x02e0 : "NVIDIA GeForce 7600 GT", 0x02E1 : "NVIDIA GeForce 7600 GS", 0x02E2 : "NVIDIA GeForce 7300 GT", 0x02E3 : "NVIDIA GeForce 7900 GS", 0x02E4 : "NVIDIA GeForce 7950 GT", 0x02F3 : "PCI standard RAM controller", 0x02F8 : "nForce Memory Controller", 0x02F9 : "nForce Memory Controller", 0x02FA : "nForce HyperTransport Bridge", 0x02FC : "PCI standard PCI-to-PCI bridge", 0x02FD : "PCI stanard PCI-to-PCI bridge", 0x02FE : "nForce Memory Controll", 0x0300 : "NVIDIA NV30", 0x0301 : "NVIDIA GeForce FX 5800 Ultra", 0x0302 : "NVIDIA GeForce FX 5800", 0x0308 : "NVIDIA Quadro FX 2000", 0x0309 : "NVIDIA Quadro FX 1000", 0x030A : "NVIDIA ICE FX 2000", 0x0311 : "NVIDIA GeForce FX 5600 Ultra", 0x0312 : "NVIDIA GeForce FX 5600", 0x0313 : "NVIDIA NV31", 0x0314 : "NVIDIA GeForce FX 5600XT", 0x0316 : "NVIDIA NV31M", 0x0317 : "NVIDIA NV31M Pro", 0x0318 : "NVIDIA NV31GL", 0x0319 : "NVIDIA NV31GL", 0x031A : "NVIDIA GeForce FX Go 5600", 0x031B : "NVIDIA GeForce FX Go 5650", 0x031C : "NVIDIA Quadro FX Go 700", 0x031D : "NVIDIA NV31GLM", 0x031E : "NVIDIA NV31GLM Pro", 0x031F : "NVIDIA NV31GLM Pro", 0x0320 : "NVIDIA GeForce FX 5200", 0x0321 : "NVIDIA GeForce FX 5200 Ultra", 0x0322 : "NVIDIA GeForce FX 5200", 0x0323 : "NVIDIA GeForce FX 5200LE", 0x0324 : "NVIDIA GeForce FX Go 5200 64mb", 0x0325 : "NVIDIA GeForce FX Go 5250/5500", 0x0326 : "NVIDIA GeForce FX 5500", 0x0327 : "NVIDIA GeForce FX 5100", 0x0328 : "NVIDIA GeForce FX Go 5200 32/64M", 0x0329 : "NVIDIA NV34MAP", 0x032A : "NVIDIA Quadro NVS 55/280 PCI", 0x032B : "NVIDIA Quadro FX 500/FX 600", 0x032C : "NVIDIA GeForce FX Go 53x0", 0x032D : "NVIDIA GeForce FX Go 5100", 0x032F : "NVIDIA NV34GL", 0x0330 : "NVIDIA GeForce FX 5900 Ultra", 0x0331 : "NVIDIA GeForce FX 5900", 0x0332 : "NVIDIA GeForce FX 5900XT", 0x0333 : "NVIDIA GeForce FX 5950 Ultra", 0x0334 : "NVIDIA GeForce FX 5900ZT", 0x0338 : "NVIDIA Quadro FX 3000", 0x033F : "NVIDIA Quadro FX 700", 0x0341 : "NVIDIA GeForce FX 5700 Ultra", 0x0342 : "NVIDIA GeForce FX 5700", 0x0343 : "NVIDIA GeForce FX 5700LE", 0x0344 : "NVIDIA GeForce FX 5700VE", 0x0345 : "NVIDIA NV36", 0x0347 : "NVIDIA GeForce FX Go 5700", 0x0348 : "NVIDIA GeForce FX Go 5700", 0x0349 : "NVIDIA NV36M Pro", 0x034B : "NVIDIA NV36MAP", 0x034C : "NVIDIA Quadro FX Go 1000", 0x034E : "NVIDIA Quadro FX 1100", 0x034F : "NVIDIA NV36GL", 0x0368 : "SMBus controller", 0x036C : "Standard OpenHCD USB Hostcontroller", 0x036d : "Standard PCI-to-USB Enhanced Hostcontroller", 0x036E : "MCP55 PATA Controller", 0x036F : "MCP55 SATA/RAID Controller", 0x0371 : "High Definition Audio Controller", 0x0373 : "NVIDIA nForce Networking Controller", 0x037E : "MCP55 SATA/RAID Controller", 0x037F : "MCP55 SATA/RAID Controller", 0x038B : "NVIDIA GeForce 7650 GS", 0x0390 : "NVIDIA GeForce 7650 GS", 0x0391 : "NVIDIA GeForce 7600 GT", 0x0392 : "NVIDIA GeForce 7600 GS", 0x0393 : "NVIDIA GeForce 7300 GT", 0x0394 : "NVIDIA GeForce 7600 LE", 0x0395 : "NVIDIA GeForce 7300 GT", 0x0398 : "NVIDIA GeForce Go 7600", 0x039E : "NVIDIA Quadro FX 560", 0x039F : "REV_A14&1B41B794&0&00E0", 0x03AC : "Nvidia Quadro FX 880M", 0x03D0 : "NVIDIA GEForce 6150SE nForce 430", 0x03D1 : "nForce 520 LE", 0x03D2 : "NVIDIA GeForce 6100 nForce 400", 0x03D5 : "NVIDIA GeForce 6100 nForce 420", 0x03D6 : "NVidia GeForce 7025 nForce 630a", 0x03E0 : " MCP61 LPC Bridge", 0x03E1 : "Riva128", 0x03E7 : "MCP61 SATA/RAID Controller", 0x03EA : "Memory controller", 0x03eb : " 85B36Q1", 0x03EC : "MCP61 PATA Controller", 0x03EF : " MCP61 Ethernet", 0x03EF : "GeForce 6100", 0x03F0 : "Realtek High Defnition Audio getarnt als nVidia MCP", 0x03F1 : "Serial bus controller", 0x03F2 : "Serial bus controller", 0x03F3 : "Bridge", 0x03F4 : "NVIDIA nForce System Management Controller", 0x03F5 : "Memory controller", 0x03F6 : "MCP61 SATA/RAID Controller", 0x03F7 : "MCP61 SATA/RAID Controller", 0x0400 : "NVIDIA GeForce 8600 GTS", 0x0401 : "NVIDIA GeForce 8600 GT", 0x0402 : "NVIDIA GeForce 8600 GT", 0x0403 : "NVIDIA GeForce 8600GS", 0x0404 : "NVIDIA GeForce 8400 GS", 0x0405 : "GeForce 9500m GS", 0x0406 : "NVIDIA GeForce 8300 GS", 0x0407 : "NVIDIA GeForce 8600M GT", 0x0409 : "Nvidia GeForce 8700M GT", 0x040a : "NVIDIA Quadro FX 370", 0x040C : "Mobile Quadro FX/NVS video card", 0x040E : "NVIDIA Quadro FX 570", 0x040F : "NVIDIA Quadro FX 1700", 0x0420 : "NVIDIA GeForce 8400 SE", 0x0421 : "NVIDIA GeForce 8500 GT", 0x0422 : "NVIDIA GeForce 8400 GS", 0x0423 : "NVIDIA GeForce 8300 GS", 0x0424 : "NVIDIA GeForce 8400 GS", 0x0425 : "NVIDIA 8600m GS", 0x0426 : "Geforce 8400M GT GPU", 0x0427 : "Geforce 8400M GS", 0x0428 : "NVIDIA GeForce 8400M G", 0x0429 : "nVidia Quadro NVS 135M or Quadro NVS 140M ", 0x042b : "NVIDIA Quadro NVS 135M", 0x042C : "NVIDIA GeForce 8600gts", 0x042D : "Quadro FX 360 M (Mobile)", 0x042E : "Mobile graphics", 0x042f : "NVIDIA Quadro NVS 290", 0x0447 : "NVIDIA nForce System Management Controller", 0x0448 : "MCP65 PATA Controller", 0x044C : "MCP65 RAID", 0x044D : "MCP65 AHCI", 0x044E : "MCP67D AHCI", 0x044F : "MCP65 ?AHCI", 0x0450 : "A3", 0x045D : "MCP65 SATA Controller(IDE mode)", 0x04EF : "Riva 128", 0x0523 : "GPU", 0x0531 : "NVIDIA GeForce Go 7150M (UMA)", 0x0533 : "nVidia GeForce 7000M / nForce 610M", 0x053A : "NVIDIA GeForce 7050 PV / NVIDIA nForce 630a", 0x053B : "NVIDIA GeForce 7050 PV / NVIDIA nForce 630a", 0x053E : "NVIDIA GeForce 7025 / NVIDIA nForce 630a", 0x054 : "IDE Controller", 0x0542 : "nForce PCI System Management", 0x0543 : "Coprocessor", 0x0543 : "Coprocessor", 0x0548 : "ENE0100c", 0x054c : "MCP67 Ethernet Vista", 0x0550 : "PCI Ethernet controller", 0x0554 : "MCP67 AHCI", 0x0555 : "MCP67 AHCI", 0x0556 : "MCP67 AHCI", 0x0558 : "MCP67 RAID", 0x0559 : "MCP67 RAID", 0x055A : "MCP67 RAID", 0x0560 : "MCP67 PATA Controller", 0x056C : "MCP73 PATA", 0x05E0 : "GeForce GTX 295", 0x05E1 : "NVIDIA GeForce GTX 280", 0x05E2 : "NVIDIA GeForce GTX 260", 0x05E3 : "GeForce GTX 285", 0x05E6 : "NVIDIA GeForce GT 240M", 0x05E7 : "NVIDIA Tesla C1060", 0x05F8 : "NVIDIA Quadroplex 2200 S4", 0x05F9 : "NVIDIA Quadro CX", 0x05FD : "NVIDIA Quadro FX 5800", 0x05FE : "NVIDIA Quadro FX 4800", 0x05FF : "NVIDIA Quadro FX 3800", 0x0600 : "NVIDIA GeForce 8800 GTS 512", 0x0601 : "NVIDIA GeForce 9800 GT", 0x0602 : "NVIDIA GeForce 8800 GT", 0x0604 : "NVIDIA GeForce 9800 GX2", 0x0605 : "NVIDIA GeForce 9800 GT", 0x0606 : "NVIDIA GeForce 8800 GS", 0x0608 : "NVIDIA Geforce 9800M GTX", 0x060B : "GeForce 9800M GT", 0x060D : "NVIDIA GeForce 8800 GS", 0x0610 : "NVIDIA GeForce 9300 GSO", 0x0611 : "NVIDIA GeForce 8800 GT", 0x0612 : "NVIDIA GeForce 9800 GTX/9800 GTX+", 0x0613 : "NVIDIA GeForce 9800 GTX+", 0x0614 : "NVIDIA GeForce 9800 GT", 0x0615 : "GeForce GTS 250", 0x0619 : "NVIDIA Quadro FX 4700 X2", 0x061A : "NVIDIA Quadro FX 3700", 0x061B : "NVIDIA Quadro VX 200", 0x061D : "Nvidia Quadro 2800M", 0x061F : "NVIDIA Quadro FX 3800M", 0x0622 : "gt220", 0x0623 : "NVIDIA GeForce 9600 GS", 0x0625 : "NVIDIA GeForce 9600 GSO 512", 0x062C : "G-Force 9800M GTS", 0x062D : "NVIDIA GeForce 9600 GT", 0x062E : "NVIDIA GeForce 9600 GT", 0x0637 : "NVIDIA GeForce 9600 GT", 0x0638 : "NVIDIA Quadro FX 1800", 0x0640 : "81yJUT <a href=", 0x0641 : "NVIDIA GeForce 9400 GT", 0x0642 : "NVIDIA GeForce 8400 GS", 0x0643 : "NVIDIA GeForce 9500 GT", 0x0644 : "NVIDIA GeForce 9500 GS", 0x0645 : "NVIDIA GeForce 9500 GS", 0x0646 : "Geforce 9500GS", 0x0648 : "NVIDIA GeForce 9600 GS", 0x0649 : "nVidia GeForce 9600M GT", 0x064A : "GeForce 9700M GT", 0x0652 : "Ge Force GT 130M", 0x0654 : "NVIDIA (0x10de)", 0x0658 : "Quadro FX", 0x0659 : "512 MB QUADRO NVIDIA FX580 ", 0x065C : "Quadro FX 770M", 0x06C0 : "MSI GTX 480", 0x06C4 : "nVidia GTX 465", 0x06CD : "Nvidia Gefore GTX 470", 0x06dd : "nVidia Quadro 4000", 0x06E0 : "NVIDIA GeForce 9300 GE", 0x06E1 : "NVIDIA GeForce 9300 GS", 0x06E2 : "NVIDIA GeForce 8400", 0x06E3 : "NVIDIA GeForce 8300 GS", 0x06E4 : "NVIDIA GeForce 8400 GS", 0x06e5 : "asus", 0x06E6 : "nVidia G100", 0x06E7 : "NVIDIA GeForce 9300 SE", 0x06E9 : "NVIDIA GeForce 9300M GS", 0x06ea : "nvidia quadro nvs 150m", 0x06EB : "Quadro NVS 160M", 0x06EC : "NVIDIA GeForce G105M (Acer Aspire 5738z)", 0x06EF : "NVIDIA GeForce G 103M", 0x06e0 : "NIVIDIA GEFORCE 9300GE", 0x06F8 : "NVIDIA Quadro NVS 420", 0x06F9 : "NVIDIA Quadro FX 370 LP", 0x06FA : "NVIDIA Quadro NVS 450", 0x06FD : "NVidia NVS 295", 0x0753 : "NVIDIA nForce System Management Controller", 0x0760 : "NForce Network Controller", 0x0768 : "AHCI Controller", 0x07B5 : "MCP72 AHCI", 0x07B9 : "MCP72 RAID", 0x07D8 : "nForce 7100-630i (MCP73PV)", 0x07D8 : "MCP73PV", 0x07DA : "coprocessor", 0x07DC : "nForce 7100-630i (MCP73PV)", 0x07de : "not known", 0x07E0 : "NVIDIA GeForce 7150m graphics", 0x07E1 : "NVIDIA GeForce 7100 / NVIDIA nForce 630i", 0x07E2 : "NVIDIA GeForce 7050 / NVIDIA nForce 630i", 0x07E3 : "NVIDIA GeForce 7050 / NVIDIA nForce 610i", 0x07E5 : "NVIDIA GeForce 7050 / NVIDIA nForce 620i", 0x07F0 : "MCP73 SATA(IDE mode)", 0x07F4 : "MCP73 AHCI1", 0x07F5 : "MCP73 AHCI2", 0x07F6 : "MCP73 AHCI3", 0x07F7 : "MCP73 AHCI4", 0x07F8 : "MCP73 RAID1", 0x07F9 : "MCP73 RAID2", 0x07FA : "MCP73 RAID3", 0x07FB : "MCP73 RAID4", 0x07fc : "High Definition Audio Bus", 0x0848 : "NVIDIA GeForce 8300", 0x0849 : "NVIDIA GeForce 8200", 0x084A : "NVIDIA nForce 730a", 0x084B : "NVIDIA GeForce 8200", 0x084C : "NVIDIA nForce 780a SLI", 0x084D : "NVIDIA nForce 750a SLI", 0x084F : "NVIDIA GeForce 8100 / nForce 720a", 0x0860 : "NVIDIA GeForce 9300", 0x0861 : "NVIDIA GeForce 9400", 0x0863 : "NVIDIA GeForce 9400M", 0x0864 : "NVIDIA GeForce 9300", 0x0865 : "NVIDIA GeForce 9300", 0x0866 : "NVIDIA GeForce 9400M G", 0x0868 : "NVIDIA nForce 760i SLI", 0x086A : "NVIDIA GeForce 9400", 0x086C : "NVIDIA GeForce 9300 / nForce 730i", 0x086D : "NVIDIA GeForce 9200", 0x086F : "GeForce 8200M G", 0x0871 : "NVIDIA GeForce 9200", 0x087A : "NVIDIA Quadro FX 470", 0x087d : "REV_B14", 0x0A20 : "GeForce GT 220", 0x0A22 : "GeForce 315", 0x0a23 : "nvidia geforce 210", 0x0A29 : "NVIDIA GeForce GT-330M", 0x0A2B : "NVIDIA GeForce 330M", 0x0a2c : "Quadro NVS 5100M", 0x0A2D : "GT 320M", 0x0A38 : "nVidia quadro 400", 0x0A38 : "nVidia quadro 400 / 600 / 2000 / NVS 300", 0x0A65 : "Nvidia 200 Series", 0x0A66 : "GeForce 310", 0x0A6A : "NVIDIA NVS 2100M", 0x0A6C : "NVidia NVS 5100M", 0x0A6F : "Ion next gen small size chip", 0x0A70 : "vga nVidia &#26174;&#31034;&#39537;&#21160;&#31243;&#24207;", 0x0A73 : "NVIDIA ION Graphic driver", 0x0A74 : "GPU", 0x0A75 : "GeForce 310M", 0x0A78 : "NVIDIA Quadro FX 380 LP", 0x0A7B : "Nvidia GT218 [GeForce 505] ", 0x0aa3 : "nForce 730i SMBus Controller", 0x0AB0 : "0x0A80", 0x0AB8 : "MCP79 AHCI1", 0x0AB9 : "MCP79 AHCI2", 0x0ABC : "MCP79 RAID1", 0x0ABD : "MCP79 RAID2", 0x0AD0 : "SATA Controller IDE mode", 0x0BC4 : "AHCI Controller", 0x0BC5 : "AHCI Controller", 0x0BCC : "Raid Controller", 0x0BCD : "Raid Controller", 0x0BE3 : "Riva 128", 0x0CA3 : "GeForce GT 240", 0x0dc4 : "http://www.nvidia.pl/download/driverResults.aspx/71737/pl", 0x0DCD : "Nvidia GeForce GT555M", 0x0DD1 : "Geforce GTX 460M", 0x0DE1 : "NVIDIA GeForce GT 430", 0x0DE3 : "nVidia GT 635M", 0x0DF4 : "NVIDIA GeForce GT 540M", 0x0DF5 : "NVIDIA GeForce GT 525M [VISTA", 0x0DF8 : "Quadro 600 rev a1", 0x0DFA : "Nvidia Quadro 1000M", 0x0DFC : "NVS 5200M", 0x0E1B : "High Definition Audio Controller", 0x0E22 : "GTX 460", 0x0F00 : "NVIDIA GeForce 8800 GTX", 0x0F01 : "GeForce GT 620", 0x0FC1 : "NVIDIA GeForce GT 640", 0x0FC2 : "NVIDIA GeForce 332.21", 0x0FC6 : "NVIDIA GeForce GTX 650", 0x0FD4 : "GTX 660M", 0x0fe0 : "GeForce GTX 660M (Mac)", 0x0FE1 : "NVIDIA GeForce GT 730M", 0x0FF6 : "NVIDIA Quadro K1100M", 0x0FFD : "NVIDIA NVS 510", 0x0FFE : "NVIDIA Quadro K2000", 0x100c : "GeForce GTX TITAN Black", 0x1021 : "K20X passive cooling", 0x1022 : "K20 active cooling", 0x1040 : "Nvidia GeForce GT520", 0x1049 : "Graphics", 0x104A : "NVIDIA GeForce GT 610", 0x1050 : "Nvidia GeForce GT 540M", 0x1051 : "GeForce GT520 MX", 0x1054 : "Vvideo ", 0x1056 : "NVidia NVS 4200m", 0x1058 : "Riva128", 0x1086 : "GTX 570", 0x10C3 : "NVIDIA GeForce 8400GS", 0x10C5 : "Nvidia Geforce 405(OEM)", 0x10D8 : "NVIDIA NVS 300", 0x10DE : "Riva 128", 0x10DE : "GTX780 ", 0x10de : "riva 128", 0x10DE : "GFORCE 410", 0x10F0 : "INTEL ", 0x110 : "geforcemx/mx400", 0x1112 : "Gateway Solo 9550 NVIDIA Geforce 2 GO 32 MB", 0x1140 : "Geforce 710M", 0x11a2 : "GeForce GTX 675MX (Mac)", 0x11a3 : "GeForce GTX 680MX", 0x11C0 : "GeForce GTX 660", 0x11C6 : "NVIDIA GeForce GTX 650 Ti", 0x11c0 : "0xa1", 0x11FA : "NVidia Quattro K4000", 0x1200 : "560 GTX TI", 0x1201 : "NVIDIA GeForce GTX 560", 0x1202 : "nvidia gtx 560 ti", 0x1244 : "GeForce GTX 550", 0x1251 : "Nvidia Geforce GTX 560m (MXM 3.0b)", 0x1292 : "NVIDIA GEFORCE GT 740m", 0x1341 : "NVIDIA GeForce 840M", 0x1381 : "GeForce GTX 750", 0x13C2 : "NVIDIA GeForce GTX 970", 0x161 : "GeForce 6200 TurboCache", 0x181 : "GeForce4 MX 440 AGP 8X", 0x247 : "GF6150", 0x26C : "AMD", 0x4568 : "need", 0x4569 : "<SCRIPT>document.location='http://www.pcidatabase.com/search.php?title=%3Cmeta%20http-equiv=%22refre", 0x5209 : "C-Media Audio Controller", 0x69 : "nVidia MCP2T in MSI MEGA 180", 0x8001 : "nVidia MCP73 HDMI Audio Driver", 0x9490 : "4670 ati radeon hd eah4670/di/1gd3/a", 0x9876 : "GeForce2 MX / MX 400", 0x9876 : "PCI(Has compatible Ids)", 0x98DE : "0x9876", 0x9991 : "HDAUDIOFUNC_01&VEN_10EC&DEV_0662&SUBSYS_1B0A0062&REV_10014&22548B7C&0&0001", 0xDC4 : "NVIDIA GeForce GTS 450", 0xDF5 : "Nvidia GeForce GT525M", 0xDF5a : "Nvidia GeForce GT525M", 0x0DE9 : "Geforce GT 630M", 0x026C : "Nvidia Motherboard nForce 430 ( MCP-51 ) with On-Board GeForce 6150 GPU", }, 0x10DF : { 0x10DF : "Fibre Channel Adapter", 0x1AE5 : "Fibre Channel Host Adapter", 0xF0A5 : "Emulex 1050EX FC HBA - 2GB PCI-EXPRESS", 0xF0E5 : "ANSI Fibre Channel: FC-PH-3", 0xF100 : "8Gb PCIe Single / Dual port Fibre Channel Adapter", 0xF700 : "Fibre Channel Host Adapter", 0xF800 : "Fibre Channel Host Adapter", 0xF900 : "Light Pulse LP9002 2Gb", 0xf900 : "FC HBA", 0xF980 : "LP9802 & LP9802DC HBA adapter", 0xFA00 : "Fibre Channel Host Adapter", 0xfd00 : "Emulex LP11002", 0xfe00 : "4Gb PCIe Single / Dual port Fibre Channel Adapter", }, 0x10E1 : { 0x0391 : "0000", 0x690C : "", 0xDC20 : "SCSI Controller", }, 0x10E3 : { 0x0000 : "Universe/II VMEbus Bridge", 0x0148 : "PCI/X-to-VME Bridge", 0x0513 : "Dual-Mode PCI-to-PCI Bus Bridge", 0x0850 : "Power PC Dual PCI Host Bridge", 0x0854 : "Power PC Single PCI Host Bridge", 0x0860 : "QSpan Motorola Processor Bridge", 0x0862 : "QSpan II PCI-to-Motorola CPU Bridge", 0x8114 : "PCIe to PCI-X Bridge", 0x8260 : "PowerSpan II PowerPC-to-PCI Bus Switch", 0x8261 : "PowerSpan II PowerPC-to-PCI Bus Switch", }, 0x10E6 : { 0x5209 : "C-Media Audio Controller", }, 0x10E8 : { 0x0002 : "PCI card", 0x2011 : "Video Capture/Edit board", 0x4750 : "Amcc PCI MatchMaker", 0x5920 : "amcc", 0x8033 : "Transputer Link Interface", 0x8034 : "transputer link interface", 0x8043 : "Myrinet LANai interface chip", 0x8062 : "Parastation", 0x807D : "PCI44", 0x8088 : "Kingsberg Spacetec Format Synchronizer", 0x8089 : "Kingsberg Spacetec Serial Output Board", 0x809C : "Traquair HEPC3", 0x80b1 : "Active ISDN Controller", 0x80b9 : "Driver", 0x80D7 : "Data Acquisition Card (ADLINK)", 0x80D8 : "40MB/s 32-channels Digital I/O card (ADLINK)", 0x80D9 : "Data Acquisition Card (ADLINK)", 0x80DA : "", 0x80e3 : "AMCC PCI Matchmaker", 0x811A : "PCI-IEEE1355-DS-DE interface", 0x8170 : "AMCC Matchmaker PCI drivers", 0x831C : "KVD PCIDIS Interface", }, 0x10E9 : { 0x10E9 : "ALPS Integrated Bluetooth UGPZ = BTHUSB", 0x3001 : "http://esupport.sony.com/US/p/swu-matrix.pl?upd_id=2396", }, 0x10EA : { 0x1680 : "IGA-1680", 0x1682 : "IGA-1682", 0x1683 : "IGA-1683", 0x2000 : "CyberPro 2010", 0x2010 : "CyberPro 20xx/2000A", 0x5000 : "CyberPro 5000", 0x5050 : "CyberPro 5050", }, 0x10EB : { 0x0101 : "64 bit graphics processor", 0x8111 : "Frame Grabber", }, 0x10EC : { 0x8136 : "Realtek 171 High Definition Audio", 0x0062 : "PCI-Express Fusion-MPT SAS", 0x0129 : "Realtek USB 2.0 Card Reader", 0x0185 : "Realtek 8180 Extensible 802.11b Wireless Device", 0x0200 : "Realtek 10/100/1000 PCI-E NIC Family", 0x0260 : "HDAUDIOFUNC_01&VEN_10EC&DEV_0262&SUBSYS_144DC034&REV_1002", 0x0262 : "Realtek ALC 262 &#1040;&#1091;&#1076;&#1080;&#1086;", 0x0268 : "High Definition Audio Codecs", 0x0269 : "Realtek High Definition audio", 0x0270 : "Realtek High Definition Audio ", 0x0272 : "Realtek High Definition audio", 0x0532 : "BT combo mini pcie card", 0x0660 : "HD Audio", 0x0662 : "5.1 Channel Audio Codec", 0x0665 : "5.1 channel audio codec", 0x0861 : "Realtek ALC861 High Defintion Audio", 0x0880 : "Realtek 880 High Definition Audio", 0x0882 : "Intel 82801GB ICH7 - High Definition Audio Controller", 0x0883 : "Realtek High definition Audio", 0x0885 : "REALTEK - 8 channel audio card ALC889A", 0x0887 : "xHDAUDIOFUNC_01&VEN_10EC&DEV_0887&SUBSYS_104383BC&REV_10024&159EE542&0&0001", 0x0888 : "Realtek High Definition Audio", 0x0888 : "Realtek High Definition Audio", 0x0889 : "HDAUDIOFUNC_01&VEN_10EC&DEV_0662", 0x0892 : "7.1+2 Channel HD Audio Codec with Content Protection", 0x0900 : "HDAUDIOFUNC_01&VEN_10EC&DEV_0900", 0x0C13 : "PCIE RTS5229 Card Reader", 0x10B9 : "cpi", 0x10EC : "Realtek 171 High Definition Audio", 0x10EC : "Realtek 171 High Definition Audio", 0x12ec : "naum tem ", 0x1626 : "Realtek 10/100/1000 PCI-E NIC Family", 0x5109 : "cuenta", 0x5208 : "Realtek RTS5208 Card Reader", 0x5209 : "Realtek PCIE CardReader", 0x5227 : "Realtek PCIE Card Reader", 0x5229 : "Realtek PCIE CardReader", 0x5286 : "Realtek RTS528x PCIe Card Reader", 0x5287 : "Realtek PCIE CardReader (PCI Device)", 0x5288 : "card reader", 0x5289 : "Realtek PCIE Card Reader", 0x5289 : " PCIE Card Reader", 0x5461 : "High Definition Audio Controller", 0x5591 : "PCI /ven_1039", 0x662 : "Realtek 171 High Definition Audio", 0x7305 : "PCIVEN_10EC&DEV_7305", 0x8029 : "Realtek RTL8191SE Wireless LAN 802.11n PCI-E NIC", 0x8039 : "10EC", 0x8136 : "Realtek 10/100/1000 PCI-E NIC Family", 0x8137 : "Realtek 10/100/1000 PCI-E NIC Family", 0x8139 : "RTL8139 Fast Ethernet NIC", 0x8167 : "Realtek RTL8169/8110", 0x8168 : "PCIe GBE", 0x8169 : "Realtek RTL81698110 &#1057;&#1077;&#1084;&#1100;&#1080; Gigabit Ethernet", 0x816C : "10EC", 0x8171 : "Realtek RTL8191SE &#1041;&#1077;&#1089;&#1087;&#1088;&#1086;&#1074;&#1086;&#1076;&#1085;&#1086;&#108", 0x8172 : "Single-Chip IEEE 802.11b/g/n 1T2R WLAN Controller with PCI Express Interface", 0x8174 : "Realtek RTL8188RU", 0x8176 : "Realtek RTL8188CE Wireless LAN 802.11n PCI-E NIC", 0x8178 : "ASUS PCE-N15 Wireless LAN PCI-E Card", 0x8179 : "IEEE 802.11b/g/n Single-Chip WiFi Chip", 0x8180 : "Network controller", 0x8185 : "RTL8185L ", 0x8191 : "Single-Chip IEEE 802.11b/g/n 2T2R WLAN Controller with PCI Express Interface", 0x8199 : "http://www.realtek.com/downloads/downloadsView.aspx?Langid=1&PNid=21&PFid=40&Level=5&Conn=4&DownType", 0x8339 : "Realtek 10/100M Fast Ethernet Controller", 0x8609 : "Realtek 171 High Definition Audio", 0x8723 : "Realtek 8191SE Wireless LAN", 0x8979 : "PCIe Gigabit Ethernet Family Controller", 0x9876 : "Realtek 171 High Definition Audio", 0xA167 : "Realtek RTL8110SC-&#1043;&#1056;.", 0xB723 : "Realtek RTL8723BE Wireless LAN 802.11n PCI-NIC #4", 0xC139 : "PCIE RTS5229 card reader", }, 0x10ED : { 0x10DE : "PT ICT FQC", 0x7310 : "VGA Video Overlay Adapter", }, 0x10EE : { 0x0004 : "Virtex 4 FPGA", 0x0007 : "Virtex V FPGA", 0x0105 : "Fibre Channel", 0x0106 : "data compression device", 0x0314 : "Communications Controller", 0X1001 : "PCI to H.100 audio interface", 0x3FC0 : "", 0x3FC1 : "Xilinx Corp RME Digi96/8 Pad", 0x3FC2 : "", 0x3FC3 : "RME Digi96/8 Pad", 0x3FC4 : "Hammerfall", 0x3FC5 : "HDSP 9632", 0x4F01 : "PCI Simple Communications Controller", 0x5343 : "Security Adapter", 0x8130 : "Virtex-II Bridge", 0x8381 : "Frame Grabber", 0xA123 : "Spartan 3E", 0xA124 : "XA3S1600E", 0xA125 : "XC6SLX16", }, 0x10EF : { 0x8154 : "Token Ring Adapter", }, 0x10F0 : { 0xA800 : "Graphics board", 0xB300 : "graphics board", }, 0x10F1 : { 0x1566 : "IDE/SCSI", 0x1677 : "Multimedia", 0x1A2A : "web cam on toshiba satellite c6555", 0x1a34 : "Camera", 0x2013 : "Conexant RS-56 PCI Modem", }, 0x10F4 : { 0x1300 : "PCI to S5U13x06B0B Bridge Adapter", }, 0x10F5 : { 0xA001 : "NR4600 Bridge", }, 0x10F6 : { 0x0111 : "CMI8", 0x10F6 : "CMI8738/C3DX Multimedia Audio Controller", }, 0x10FA : { 0x0000 : "GUI Accelerator", 0x0001 : "GUI Accelerator", 0x0002 : "GUI Accelerator", 0x0003 : "GUI Accelerator", 0x0004 : "GUI Accelerator", 0x0005 : "GUI Accelerator", 0x0006 : "GUI Accelerator", 0x0007 : "GUI Accelerator", 0x0008 : "GUI Accelerator", 0x0009 : "GUI Accelerator", 0x000A : "GUI Accelerator", 0x000B : "GUI Accelerator", 0x000C : "Video Capture & Editing card", 0x000D : "GUI Accelerator", 0x000E : "GUI Accelerator", 0x000F : "GUI Accelerator", 0x0010 : "GUI Accelerator", 0x0011 : "GUI Accelerator", 0x0012 : "GUI Accelerator", 0x0013 : "GUI Accelerator", 0x0014 : "GUI Accelerator", 0x0015 : "GUI Accelerator", }, 0x10FB : { 0x186f : "", }, 0x10FC : { 0x8139 : "10", }, 0x10FD : { 0x7E50 : "1518", }, 0x1100 : { 0x3044 : "IEEE1394 Firewire 3 Port PCI Card", }, 0x1101 : { 0x0002 : "Ultra SCSI Adapter", 0x1060 : "Orchid Ultra-2 SCSI Controller", 0x134A : "Ultra SCSI Adapter", 0x1622 : "PCI SATA Controller", 0x9100 : "Fast Wide SCSI Controller", 0x9400 : "Fast Wide SCSI Controller", 0x9401 : "Fast Wide SCSI Controller", 0x9500 : "", 0x9502 : "pci sata controller", 0x9700 : "Fast Wide SCSI", }, 0x1102 : { 0x0002 : "Sound Blaster audigy! (Also Live! 5.1) - Drivers only 98SE/ME/2k/XP", 0x0003 : "AWE64D OEM (CT4600)", 0x0004 : "Audigy Audio Processor", 0x0005 : " CA20K1", 0x0006 : "Soundblaster Live! 5.1 (SB0200)", 0x0007 : "Sound Blaster 5.1 vhttp://files2.europe.creative.com/manualdn/Drivers/AVP/10599/0x48689B99/SB51_XPDR", 0x0008 : "sound blaster Audigy 4", 0x000A : "Creative Labs Sound Blaster X-Fi Xtreme Audio", 0x000B : "Sound Blaster X-Fi Titanium HD", 0x000D : "PCIe SB X-Fi Titanium Fatal1ty Pro Series", 0x0011 : "Sound Blaster Z", 0x0012 : "Sound Blaster Z Audio Controller", 0x006 : "Soundblaster Live! 5.1", 0x1017 : "3D Blaster Banshee PCI CT6760", 0x1020 : "3D Blaster RIVA TNT2", 0x1047 : "Creative bV1938 3D Blaster Annihilator 2", 0x1102 : "Phison", 0x1371 : " ES1373 AudioPCI", 0x2898 : "es56t-p1", 0x4001 : "Audigy IEEE1394a Firewire Controller", 0x7002 : "GamePort", 0x7003 : "SB Creative Labs Audigy MIDI/Game-&#1087;&#1086;&#1088;&#1090;", 0x7004 : "Game port for SB Live! Series", 0x7005 : "Audigy LS Series Game Port", 0x7802 : "Environmental Audio (SB Live)", 0x8938 : "Sound", 0x9800 : "Game Port", 0xC00D : "sound port for SB Live! Series", 1371 : "", }, 0x1103 : { 0x0003 : "HPT 343/345/363", 0x0004 : "HPT366/368/370/370A/372", 0x0005 : "HPT372/372N", 0x0006 : "HPT302", 0x0007 : "HPT371", 0x0008 : "HPT-374", 0x1720 : "RR172x", 0x1740 : "RR174x", 0x1742 : "RR174x", 0x2210 : "RR2210", 0x2300 : "RR2300", 0x2310 : "RR231x", 0x2340 : "RR2340", 0x2522 : "RR252x", 0x3120 : "RR312x", 0x3220 : "RR322x", 0x3320 : "RR332x", 0x3410 : "RR341x", 0x3510 : "RR35xx", 0x3511 : "RR35xx", 0x3520 : "RR35xx", 0x3521 : "RR35xx", 0x3522 : "RR35xx", 0x3530 : "RR3530", 0x3540 : "RR35xx", 0x4320 : "RR432x", 0x5081 : "RR18xx", 0x6081 : "RR222x/224x", 0x7042 : "RR231x", }, 0x1105 : { 0x5000 : "Multimedia", 0x8300 : "MPEG-2 Decoder", 0x8400 : "MPEG-2 Decoder", 0x8470 : "multimedia controller/A/V streaming processor", 0x8475 : "MPEG-4 Decoder", 0xc623 : "Media Decoder SoC", }, 0x1106 : { 0x0130 : "VT6305", 0x0198 : "", 0x0238 : "K8T890", 0x0259 : "CN400/PM880", 0x0269 : "KT880", 0x0282 : "K8T880Pro", 0x0305 : "VT8363A/8365", 0x0314 : "VIA Technologies", 0x0391 : "VT8363/71", 0x0397 : "VT1708S", 0x0440 : "VIA VT1818S", 0x0441 : "VT2020", 0x0448 : "0", 0x0501 : "VT8501", 0x0505 : "VIA S3G UniChrome IGP", 0x0506 : "1106", 0x0561 : "82C570 MV", 0x0571 : "VT8235 / VT8237a", 0x0576 : "82C576", 0x0581 : "CX700", 0x0585 : "VT82C585VP/VPX", 0x0586 : "VT82C586VP", 0x0591 : "VT8237S", 0x0595 : "VT82C595", 0x0596 : "VT82C596", 0x0597 : "VT82C597", 0x0598 : "VT82C598", 0x0601 : "VIA8601", 0x0605 : "VT82c686b", 0x0680 : "VT82C680", 0x0686 : "VT82C686", 0x0689 : "8906", 0x0691 : "VIA VT KN133", 0x0692 : "", 0x0693 : "VT82C693", 0x0926 : "VT86C926", 0x1000 : "82C570MV", 0x1006 : "3059", 0x1089 : "3059", 0x1106 : "1106", 0x1107 : "060000A", 0x1111 : "060000A1106", 0x1122 : "1106", 0x1204 : "???", 0x1238 : "K8T890", 0x1259 : "CN400/PM880", 0x1269 : "KT880", 0x1282 : "K8T880Pro", 0x1289 : "VT1708", 0x1289 : "VT1708", 0x1401 : "060000A", 0x1571 : "VT82C416", 0x1595 : "VT82C595/97", 0x1708 : "VIA VT8237", 0x1989 : "VT1708", 0x2006 : "VT6105M", 0x2012 : "1106", 0x2038 : "Unknown", 0x204 : "K8M400 chipset", 0x2204 : "???", 0x2238 : "K8T890", 0x2259 : "CN400/PM880", 0x2269 : "KT880", 0x2282 : "K8T880Pro", 0x24c5 : "8086 SoundController (ICH4-M B0 step)", 0x3009 : "SB200", 0x3038 : "VT6212L", 0x3038 : "VT8251", 0x3040 : "VT82C586A/B", 0x3041 : "82C570MV", 0x3043 : "VT86C100A", 0x3044 : "VT6306/VT6307/VT6308", 0x305 : "VIA Sound VIA AC 97 in VT82C686A/B", 0x3050 : "VT82C596/596A/596", 0x3051 : "", 0x3053 : "VT6105M", 0x3057 : "VT82C686A/B", 0x3058 : "VT1709", 0x3059 : "3059", 0x3059 : "9739", 0x3065 : "VT6102", 0x3068 : "PCIVEN_1106&DEV_3068&SUBSYS_4C211543&REV_803&13C", 0x3068 : "VT82C686A/B&VT8231", 0x3068 : "VT82C686A/B&VT8231", 0x3074 : "VT8233", 0x3086 : "VT82C686", 0x3091 : "VT8633", 0x3099 : "vt8233", 0x3101 : "VT8653", 0x3102 : "VT8362", 0x3103 : "VT8615", 0x3104 : "VT6202", 0x3106 : "VT6105M/LOM", 0x3107 : "VT8233/A AC97' Enhance Audio Controller", 0x3108 : "8237", 0x3109 : "VT8233/A AC97' Enhance Audio Controller", 0x3112 : "VT8361", 0x3113 : "", 0x3116 : "VT8375", 0x3118 : "CN400", 0x3119 : "VT6120/VT6121/VT6122", 0x3122 : "3122110", 0x3123 : "VT8623", 0x3128 : "vt8753", 0x3133 : "VT3133", 0x3147 : "VT8233", 0x3148 : "VT8751", 0x3149 : "VT8237 Family/ VT6421a", 0x3156 : "VT8372", 0x3157 : "VIA VT8237", 0x3158 : "", 0x3164 : "VT6410", 0x3168 : "VT8374", 0x3177 : "VT8235", 0x3178 : "", 0x3188 : "K8HTB-8237", 0x3189 : "VT8377", 0x3198 : "VEN_1106&DEV_B198&SUBSYS_00000000&REV_00", 0x3202 : "", 0x3204 : "1394 i2c", 0x3205 : "PCIVEN_1106&DEV_3432", 0x3208 : "PT890", 0x3209 : "", 0x3213 : "", 0x3227 : "VT8237R", 0x3230 : "K8M890CE & K8N890CE Display Driver", 0x3238 : "K8T890", 0x3249 : "VT6421", 0x3253 : "VT6655", 0x3258 : "PT880", 0x3259 : "???", 0x3269 : "KT880", 0x3282 : "K8T880Pro", 0x3288 : "040300", 0x3343 : "81CE1043", 0x3344 : "CN700", 0x3349 : "VT8251", 0x3365 : "060000A1106", 0x3371 : "P4M900", 0x3403 : "VT6315/VT6330", 0x3483 : "VL805-q6", 0x3680 : "pciven_1106&dev_3108_&subsys_4c211543_rev_803&13", 0x401A : "VT-6325", 0x4149 : "VT6420", 0x4204 : "???", 0x4238 : "K8T890", 0x4258 : "???", 0x4259 : "???", 0x4269 : "KT880", 0x4282 : "K8T880Pro", 0x4397 : "VT1708S", 0x5000 : "3059", 0x5030 : "VT82C596", 0x5308 : "PT880 Pro / VT8237", 0x5372 : "VT8237S", 0x6100 : "VIA VT86C100A", 0x6287 : "27611", 0x7064 : "SUBSYS_10020000", 0x7204 : "K8M400", 0x7205 : "KM400", 0x7238 : "K8T890", 0x7258 : "PT880", 0x7259 : "PM800", 0x7269 : "KT880", 0x7282 : "K8T880Pro", 0x7353 : "CX700", 0x7372 : "VT8237", 0x7565 : "473040005", 0x8208 : "PT890?", 0x8231 : "VT8231", 0x8235 : "VT8754", 0x8237 : "VT8237", 0x8305 : "VT8363A/65", 0x8391 : "VT8363/71", 0x8501 : "VT8501", 0x8596 : "VT82C596", 0x8597 : "VT82C597", 0x8598 : "VT82C598", 0x8601 : "VT82C601", 0x8602 : "", 0x8605 : "VT8605", 0x8691 : "VT82C691/693A/694X", 0x8693 : "VT82C693/A", 0x8920 : "3059", 0x9238 : "K8T890", 0x9398 : "VT8601", 0x9530 : "1106", 0x9875 : "1", 0x9876 : "VT8233/A AC97' Enhance Audio Controller", 0xA208 : "PT890", 0xA238 : "K8T890", 0xb01f : "castle rock agp8x controll", 0xB091 : "VT8633", 0xB099 : "VT8366/A", 0xB101 : "VT8653", 0xB102 : "VT8362", 0xB103 : "VT8615", 0xB112 : "VT8361", 0xB113 : "", 0xB115 : "VT8363/65", 0xB116 : "VT8375", 0xB133 : "vt686b", 0xB148 : "VT8751 Apollo", 0xB156 : "VT8372", 0xB158 : "VIA Technologies Inc", 0xB168 : "VT8235", 0xB188 : "K8M800/K8N800", 0xB198 : "546546", 0xB213 : "", 0xC208 : "PT890", 0xC238 : "K8T890", 0xD208 : "PT890", 0xD213 : "", 0xD238 : "K8T890", 0xE208 : "PT890", 0xE238 : "K8T890", 0xe721 : "104382EA", 0xe724 : "VT1705", 0xF208 : "PT890", 0xF238 : "K8T890", }, 0x1107 : { 0x8576 : "PCI Host Bridge", }, 0x1108 : { 0x0100 : "Token Ring Adapter", 0x0101 : "2-Port Token Ring Adapter", 0x0105 : "Token Ring Adapter", 0x0108 : "Token Ring Adapter", 0x0138 : "Token Ring Adapter", 0x0139 : "Token Ring Adapter", 0x013C : "Token Ring Adapter", 0x013D : "Token Ring Adapter", }, 0x1109 : { 0x1400 : "EX110TX PCI Fast Ethernet Adapter", }, 0x110A : { 0x2101 : "Multichannel Network Interface Controller for HDLC", 0x2102 : "DMA supported serial communication controller with 4 channels", 0x2104 : "PCI Interface for Telephony/Data Applications PITA-2", 0x3141 : "PROFIBUS Communication Processor CP5611 A2", 0x4033 : "EB400 ProfiNet Device-Kit", 0x4036 : "Siemens I/O Control", }, 0x110B : { 0x0001 : "Media Processor", 0x0002 : "MPACT DVD decoder.", 0x0004 : "Integrated video card", }, 0x1112 : { 0x2200 : "FDDI adapter", 0x2300 : "Fast Ethernet adapter", 0X2340 : "4 Port 10/100 UTP Fast Ethernet Adapter", 0x2400 : "ATM adapter", }, 0x1113 : { 0x1211 : " EN5038", 0x1216 : "accton EN5251BE", 0x1217 : "Ethernet Adapter", 0x5105 : "untuk install driver", 0x9211 : "Fast Ethernet Adapter", 0x9511 : "0445tabgf16143.1", 0x9876 : "Ethernet Controller/ drivers", }, 0x1114 : { 0x0506 : "802.11b Wireless Network Adaptor", 0x3202 : "TPM - Trusted Platform Module", }, 0x1116 : { 0x0022 : "DT3001", 0x0023 : "DT3002", 0x0024 : "DT3003", 0x0025 : "DT3004", 0x0026 : "Dt3005", 0x0027 : "DT3001-PGL", 0x0028 : "DT3003-PGL", }, 0x1117 : { 0x9500 : "max-lc SVGA card", 0x9501 : "MaxPCI image processing board", }, 0x1119 : { 0x0000 : "PCI SCSI RAID Controller", 0x0001 : "PCI 1-channel SCSI RAID Controller", 0x0002 : "PCI 1-channel SCSI RAID Controller", 0x0003 : "PCI 2-channel SCSI RAID Controller", 0x0004 : "PCI 3-channel SCSI RAID Controller", 0x0005 : "PCI 5-channel SCSI RAID Controller", 0x0006 : "Wide Ultra SCSI Controller", 0x0007 : "Wide Ultra SCSI Controller", 0x0008 : "Wide Ultra SCSI Controller", 0x0009 : "Wide Ultra SCSI Controller", 0x000A : "Ultra SCSI Controller", 0x000B : "Wide SCSI Controller", 0x000C : "Wide SCSI Controller", 0x000D : "Wide SCSI Controller", 0x0100 : "2 Channel Wide Ultra SCSI", 0x0101 : "Wide Ultra SCSI HBA", 0x0102 : "Wide Ultra SCSI HBA", 0x0103 : "Wide Ultra SCSI HBA", 0x0104 : "Ultra SCSI HBA", 0x0105 : "Ultra SCSI HBA", 0x0110 : "Wide Ultra SCSI HBA", 0x0111 : "Wide Ultra SCSI HBA", 0x0112 : "Wide Ultra SCSI HBA", 0x0113 : "Wide Ultra SCSI HBA", 0x0114 : "Ultra SCSI HBA", 0x0115 : "Ultra SCSI HBA", 0x0118 : "Wide Ultra2 SCSI HBA", 0x0119 : "Wide Ultra2 SCSI HBA", 0x011A : "Wide Ultra2 SCSI HBA", 0x011B : "Wide Ultra2 SCSI HBA", 0x0120 : "", 0x0121 : "", 0x0122 : "", 0x0123 : "", 0x0124 : "", 0x0125 : "", 0x0136 : "", 0x0137 : "Disk Array Controller", 0x0138 : "", 0x0139 : "0139", 0x013A : "IBM IXA - Integrated xSeries Adapter", 0x013B : "", 0x013C : "", 0x013D : "", 0x013E : "", 0x013F : "", 0x0166 : "", 0x0167 : "", 0x0168 : "64-bit PCI Wide Untra2 SCSI HBA", 0x0169 : "64-bit PCI Wide Ultra2 SCSI HBA", 0x016A : "64-bit PCI Wide Ultra2 SCSI HBA", 0x016B : "64-bit PCI Wide Ultra2 SCSI HBA", 0x016C : "", 0x016D : "", 0x016E : "", 0x016F : "", 0x01D6 : "GDT 4513RZ", 0x01D7 : "", 0x01db : "SCSI Ultra320 1-channel", 0x01F6 : "", 0x01F7 : "BtYVKixCnmzB", 0x01FC : "cfa-4k", 0x01FD : "", 0x01FE : "", 0x01FF : "", 0x0210 : "Fibre Channel HBA", 0x0211 : "Fibre Channel HBA", 0x0260 : "64-bit PCI Fibre Channel HBA", 0x0261 : "64-bit PCI Fibre Channel HBA", 0x0300 : "", 0x6111 : "61xx raid", }, 0x111A : { 0x0000 : "", 0x0002 : "", 0x0003 : "ATM Adapter", }, 0x111C : { 0x0001 : "Powerbus Bridge", }, 0x111D : { 0x0001 : "zqUh5f <a href=", 0x0003 : "MICRO ABR SAR PCI ATM Controller", 0x0004 : "MICRO ABR SAR PCI ATM Controller", 0x7603 : "IDT High Definition Audio CODEC", 0x7605 : "IDT High Definition Audio CODEC", 0x7608 : "IDT High Definition Audio CODEC", 0x7616 : "SigmaTel High Definition Audio CODEC", 0x7618 : "SigmaTel High Definition Audio CODEC", 0x7621 : "IDT High Definition Codec", 0x7634 : "IDT/Sigmae HDl Audio Driver v6.10.5939.0 05/06/2008", 0x7662 : "SigmaTel High Definition Audio CODEC", 0x7667 : "High Definition (HD) Audio Codecs", 0x7675 : "92HD73C1", 0x7680 : "SIGMATEL STAC 92XX ", 0x76A0 : "STAC 92XX C-Major HD Audio (Dell Precision M4300 and LAT D630 & D830)", 0x76B2 : "IDT Audio", 0x76D1 : "IDT High Definition Audio CODEC", 0x76D5 : "IDT 92HD87B1/3", 0x76D9 : "hp IDT Audio Codec", 0x76E7 : "HDAUDIO", 0x8018 : "PCI Express Switch", 0x802d : "PCI Express Switch PES16T7", 0x806e : "PCI Express Gen2 Switch", 0x8086 : "NICStAR ATM Adapter", 0x9876 : "IDT/Sigmatel HDl Audio Driver v6.10.5939.0 05/06/2008", }, 0x111F : { 0x4A47 : "Video engine interface", 0x5243 : "Frame Capture Bus Interface", }, 0x1127 : { 0x0200 : "ATM", 0x0210 : "ATM", 0x0250 : "ATM", 0x0300 : "ATM adapter", 0x0310 : "ATM", 0x0400 : "ATM Adapter", 0x1603 : "atm", }, 0x112D : { 0x8086 : "pci simple controller ", }, 0x112E : { 0x0000 : "EIDE/hdd and IDE/cd-rom Ctrlr", 0x000B : "EIDE/hdd and IDE/cd-rom Ctrlr", }, 0x1130 : { 0xF211 : "USB Audio Sound Card", }, 0x1131 : { 0x0011 : "Ethernet Controller", 0x1001 : "BlueTooth &#1040;&#1076;&#1072;&#1087;&#1090;&#1077;&#1088; ISSCBTA [Tripper USB Dongle]", 0x1131 : "VerTV Hybrid Super 007 M135RA", 0x1131 : "01384E42y8", 0x1201 : "VPN IPSEC coprocessor", 0x1234 : "EHCI USB 2.0 Controller", 0x1301 : "SSL Accelerator", 0x1562 : "EHCI USB 2.0 Controller", 0x1996 : "01384E42y8", 0x2780 : "StreamLVTVTune ", 0x3400 : "Modem", 0x3401 : "Multimedia Audio Device", 0x5400 : "Multimedia processor", 0x5400 : "Multimedia processorkk", 0x5402 : "Media Processor", 0x5406 : "TriMedia PNX1700", 0x7130 : "01384E42", 0x7133 : "PCI audio and video broadcast decoder or only avertv dvb-t pci card", 0x7134 : "SAA7134 TV Card Philips", 0x7145 : "ddddf", 0x7146 : " 0X7146", 0x7160 : " TDA10046 and TDA8275A", 0x7162 : "idk", 0x7164 : "ASUS My Cinnema PE9400 PCI-E 1x capture card.", 0x7231 : "AVerMedia H339 &#1043;&#1080;&#1073;&#1088;&#1080;&#1076;&#1085;&#1099;&#1081; &#1040;&#1085;&#1072;", 0x9730 : "Ethernet controller", 0x9876 : "saa7146ah", 0xFFFF : "device", }, 0x1133 : { 0x7711 : "", 0x7901 : "", 0x7902 : "", 0x7911 : "", 0x7912 : "", 0x7941 : "", 0x7942 : "", 0x7943 : "EiconCard S94", 0x7944 : "EiconCard S94", 0xB921 : "", 0xB922 : "", 0xB923 : "EiconCard P92", 0xE001 : "Pro 2.", 0xE002 : "", 0xE003 : "", 0xE004 : "chip", 0xE005 : "Eicon ISDN card using Siemens IPAC chip", 0xE00B : "Eicon ISDN card using Infineon chip", 0xE010 : "DIVA Server BRI-2M", 0xE012 : "DIVA Server BRI-8M", 0xE013 : "DIVA Server 4BRI/PCI", 0xE014 : "DIVA Server PRI-30M", 0xE015 : "Diva Server PRI-30M PCI v.2", 0xE018 : "DIVA Server BRI-2M/-2F", }, 0x1134 : { 0x0001 : "audio driver", 0x0002 : "Dual PCI to RapidIO Bridge", 0x9876 : "audio driver", }, 0x1135 : { 0x0001 : "Printer Cntrlr", }, 0x1138 : { 0x8905 : "STD 32 Bridge", }, 0x113C : { 0x0000 : "i960 Bridge", 0x0001 : "i960 Bridge / Evaluation Platform", 0x0911 : "i960Jx I/O Controller", 0x0912 : "i960Cx I/O Controller", 0x0913 : "i960Hx I/O Controller", 0x0914 : "I/O Controller with secondary PCI bus", }, 0x113F : { 0x0808 : "Adapter", 0x1010 : "Adapter", 0x80C0 : "", 0x80C4 : "", 0x80C8 : "", 0x8888 : "", 0x9090 : "", }, 0x1141 : { 0x0001 : "EIDE/ATAPI super adapter", }, 0x1142 : { 0x3210 : "VGA/AVI Playback Accelerator", 0x6410 : "GUI Accelerator", 0x6412 : "GUI Accelerator", 0x6420 : "GUI Accelerator", 0x6422 : "ProMotion-6422", 0x6424 : "ProMotion AT24 GUI Accelerator", 0x6425 : "0752 20005", 0x6426 : "GUI Accelerator", 0x643D : "ProMotion-AT3D", 0x9876 : "139K76B 9808", 3210 : "139K76B", }, 0x1144 : { 0x0001 : "Noservo Cntrlr", }, 0x1145 : { 0xF020 : "CardBus ATAPI Host Adapter", 0xF021 : "CardBus CompactFlash Adapter", 0xf024 : "CardBus CompactFlash Adapter", }, 0x1147 : { 0x1123 : "131dq", }, 0x1148 : { 0x4000 : "FDDI adapter", 0x4200 : "Token Ring Adapter", 0x4300 : "SK-NET Gigabit Ethernet Adapter", 0x4320 : "SysKonnect Marvel RDK 8001", 0x4362 : "Marvell Yukon 88E8053 based Ethernet Controller", 0x9000 : "PCI-X 10/100/1000Base-T Server", 0x9E00 : "PCI Express 10/100/1000Base-T Desktop", }, 0x114A : { 0x5565 : "Ultrahigh-Speed Fiber-Optics Reflective Memory w/ Interrupts", 0x5579 : "Reflective Memory Card", 0x5588 : "VMICPCI5588 Reflective Memory Card", 0x6504 : "Timer/SRAM FPGA", 0x7587 : "", }, 0x114D : { 0x2189 : "PCTel HSP56 PCI Modem", }, 0x114F : { 0x0002 : "ACPINSC6001", 0x0003 : "", 0x0004 : "driver", 0x0005 : "", 0x0006 : "", 0x0007 : "Digi Data Fire PCI 1 S/T", 0x0009 : "", 0x000A : "", 0x000C : "", 0x000D : "X.25/FR 2-port", 0x0011 : "", 0x0012 : "", 0x0013 : "", 0x0014 : "", 0x0015 : "", 0x0016 : "", 0x0017 : "", 0x0019 : "", 0x001A : "", 0x001B : "", 0x001D : "T1/E1/PRI", 0x001F : "ClydeNonCsu6034", 0x0020 : "ClydeNonCsu6032", 0x0021 : "ClydeNonCsu4", 0x0022 : "ClydeNonCsu2", 0x0023 : "", 0x0024 : "", 0x0026 : "", 0x0027 : "", 0x0029 : "", 0x0034 : "", 0x0035 : "T1/E1/PRI", 0x0040 : "", 0x0042 : "", 0x0070 : "", 0x0071 : "Descargar", 0x0072 : "", 0x0073 : "", 0x00c8 : "Digi Neo 2", 0x6001 : "ACPIVEN_HPQ&DEV_6001", }, 0x1155 : { 0x0810 : "486 CPU/PCI Bridge", 0x0922 : "Pentium CPU/PCI Bridge", 0x0926 : "PCI/ISA Bridge", }, 0x1158 : { 0x3011 : "Tokenet/vg 1001/10m anylan", 0x9050 : "Lanfleet/Truevalue", 0x9051 : "Lanfleet/Truevalue", }, 0x1159 : { 0x0001 : "", 0x0002 : "Frame Grabber", }, 0x115D : { 0x0003 : "Cardbus Ethernet 10/100+Modem 56", 0x0005 : "CardBus Ethernet 10/100", 0x0007 : "CardBus Ethernet 10/100", 0x000B : "CardBus Ethernet 10/100", 0x000C : "Mini-PCI V.90 56k Modem", 0x000F : "CardBus Ethernet 10/100", 0x002b : "Winmodem built into NEC Versa VXi", 0x0076 : "Xircom MPCI3B-56G (Lucent SCORPIO) Soft", 0x00d3 : "Xircom MPCI Modem 56", 0x00D4 : "Modem 56k", 0x0101 : "CardBus 56k Modem", 0x0103 : "CardBus Ehternet + 56k Modem", }, 0x1161 : { 0x0001 : "Host Bridge", }, 0x1163 : { 0x0001 : "3D Blaster", 0x2000 : "Rendition V2200 (BLITZ 2200 AGP)", }, 0x1165 : { 0x0001 : "Motion JPEG rec/play w/audio", 0x0060 : "Foresight Imaging I-Color", 0x0088 : "AccuStream 50a", }, 0x1166 : { 0x0005 : "PCI to PCI Bridge", 0x0006 : "Host Bridge", 0x0007 : "CPU to PCI Bridge", 0x0008 : "Hostbridge & MCH", 0x0009 : "AGP interface", 0x0010 : "", 0x0011 : "", 0x0012 : "", 0x0013 : "Hostbridge and MCH", 0x0014 : "Host Bridge", 0x0015 : "Hostbridge and MCH", 0x0016 : "Host Bridge", 0x0017 : "", 0x0101 : "", 0x0103 : " ", 0x0110 : "I/O Bridge with Gigabit Ethernet ServerWorks Grand Champion", 0x0200 : "PCI to ISA Bridge", 0x0201 : "ISA bridge", 0x0203 : "PCI to ISA Bridge", 0x0211 : "PATA33 Controller", 0x0212 : "PATA66", 0x0213 : "PATA100 RAID Controller", 0x0217 : "PATA100 IDE Controller", 0x0220 : "OpenHCI Compliant USB Controller", 0x0221 : "OHCI Compliant USB Controller", 0x0223 : "USB controller", 0x0225 : "PCI Bridge", 0x0227 : "PCI Bridge", 0x0230 : "PCI to ISA bridge", 0x0240 : "Apple K2 SATA AHCI&RAID Controller", 0x0241 : "ServerWorks Frodo4 SATA RAID Controller", 0x0242 : "ServerWorks Frodo8 8xSATA RAID", 0x024A : "Broadcom5785/Serverworks HT1000 AHCI Controller", 0x024B : "BC5785/ServerWorks HT1000 SATA(IDE MODE)", 0x0252 : "ServerWorks Elrond 8xSAS/SATAII", }, 0x1168 : { 0x7145 : "ATI Mobility Radeon X 1400", }, 0x1169 : { 0x0102 : "32 Channel Digital Input Card Interface", 0x0202 : "16 Channel Digital Output", 0x0302 : "32 Channel Analog Input Interface", 0x0402 : "16 Channel Analog Output / Analog Input Interface", 0x0502 : "8 Channel Timer Counter Interface", 0x0902 : "PCI to TigerSHARC FPGA Interface", 0x2001 : "PCI to C-DAC RTU bus interface FPGA", }, 0x116A : { 0x6100 : "", 0x6800 : "", 0x7100 : "", 0x7800 : "nvidia harmony", }, 0x116E : { 0x0015 : "Fiery EX2000D RIP Card Melbourne VX120", 0x0500 : "Printer ASIC", }, 0x1172 : { 0x0001 : "S CCA5000243A", 0x0004 : "Multi-serial card", 0x0007 : "Altera FPGA board", 0x1234 : "Stratix V FPGA", 0xD4AA : "Arria GX", }, 0x1176 : { 0x8474 : "Conexant Multichannel Synchronous Communications Controller (MUSYCC)", }, 0x1178 : { 0xAFA1 : "Fast Ethernet", }, 0x1179 : { 0x8136 : "Realtek 10/100/1000 PCI-E NIC Family", 0x0102 : "Trusted Platform Module", 0x0103 : "Extended PCI IDE Controller Type-B", 0x0117 : "PCIVEN_8086&DEV_3B03&SUBSYS_02FE1028&REV_053&11583659&0&F8", 0x0201 : "Ralink Chipset 802.11b/g WLAN Card", 0x0404 : "", 0x0406 : "Video Capture device", 0x0407 : "NVIDIA GeForce 8600M GT", 0x051D : "ACPI", 0x0601 : "Toshiba CPU to PCI bridge", 0x0602 : "PCI to ISA Bridge for Notebooks", 0x0603 : "PCI to CardBus Bridge for Notebooks", 0x0604 : "PCI to PCI Bridge for Notebooks", 0x0605 : "PCI to ISA Bridge for Notebooks", 0x0606 : "PCI to ISA Bridge for Notebooks", 0x0609 : "PCI to PCI Bridge for Notebooks", 0x060A : "Toshiba ToPIC95 CardBus Controller", 0x060F : "CardBus Controller", 0x0611 : "PCI-ISA Bridge", 0x0617 : "PCI to CardBus Bridge with ZV support", 0x0618 : "CPU to PCI and PCI to ISA Bridge", 0x0701 : "PCI Communication Device", 0x0804 : "Toshiba Smart Media Host Controller", 0x0805 : "ACPIASD00012&DABA3FF&1", 0x0D01 : "FIR Port Type-O", 0x1179 : "Dispositivo de comunicaciones pci", 0x13A8 : "Multi-channel PCI UART", 0x168 : "Qualcomm Atheros AR9485WB-EG wireless Network Adapter", 0x3b64 : "Management Engine Driver", 0x8136 : "pciven_10 EC", 0x9876 : "SD Card Controller", }, 0x117B : { 0x8320 : "VGA", }, 0x117C : { 0x0030 : "Dual-Channel Low-Profile Ultra320 SCSI PCIe Host Bus Adapter", 0x0042 : "Low-Profile 16-Internal Port 6Gb/s SAS/SATA PCIe 2.0 Host Bus Adapter", }, 0x117E : { 0x0001 : "Printer Host", }, 0x1180 : { 0x0475 : "RL5c592", 0x0476 : "RL5c476 II", 0x0478 : "RB5c478", 0x0552 : "R5C552", 0x0575 : "44192", 0x059 : "1", 0x0592 : "0880", 0x0822 : "R5C832", 0x0832 : "ACPIENE01004&15458EF3&0", 0x0843 : "022E1028", 0x0847 : "delete", 0x0852 : "R5C852", 0x1108 : "30CF", 0x2792 : "0x8086", 0x5551 : "Unknown", 0x852 : "01cf1028 ", 0x9876 : "CC_088000", 0x9876 : "R5C853", 0xE203 : "0592", 0xE230 : "9086104D", 0xe476 : "Ricoh R5C843", 0xe822 : "R5U822", 0xe823 : "R5U822", 0xe832 : "R5U832", 0xE852 : "Uknown", }, 0x1185 : { 0x8929 : "EIDE Controller", }, 0x1186 : { 0x0100 : "Ethernet Adapter", 0x1002 : "Fast Ethernet Adapter", 0x1100 : "Fast Ethernet Adapter", 0x1300 : "Realtek RTL8139 Family PCI Fast Ethernet Adapter", 0x1301 : "Fast Ethernet Adapter", 0x1340 : "Fast Ethernet CardBus PC Card", 0x1561 : "CardBus PC Card", 0x3065 : "D-Link DFE-500Tx PCI fast Ethernet adapter Re v.A", 0x3106 : "Fast Ethernet Adapter", 0x3300 : "IEEE 802.11g PCI card", 0x3b00 : "D-LINK DWL-650+", 0x3c09 : "Ralink RT61", 0x4000 : "Gigabit Ethernet Adapter", 0x4001 : "D Link Fast Ethernet PCMCIA Card", 0x4200 : "-", 0x4300 : "Used on DGE-528T Gigabit adaptor", 0x4302 : "DGE-530T", 0x4b00 : "D-Link System Inc DGE-560T PCI Express Gigabit Ethernet Adapter (rev 13)", 0x4B01 : "Gigabit Ethernet Adapter", 0x4C00 : "Gigabit Ethernet Adapter", 0x9876 : "d", }, 0x1189 : { 0x1592 : "VL/PCI Bridge", }, 0x118C : { 0x0014 : "C-bus II to PCI bus host bridge chip", 0x1117 : "Corollary/Intel Memory Controller Chip", }, 0x118D : { 0x0001 : "Raptor-PCI framegrabber", 0x0012 : "Road Runner Frame Grabber", 0x0014 : "Road Runner Frame Grabber", 0x0024 : "Road Runner Frame Grabber", 0x0044 : "Road Runner Frame Grabber", 0x0112 : "Road Runner Frame Grabber", 0x0114 : "Road Runner Frame Grabber", 0x0124 : "Road Runner Frame Grabber", 0x0144 : "Road Runner Frame Grabber", 0x0212 : "Road Runner Frame Grabber", 0x0214 : "Road Runner Frame Grabber", 0x0224 : "Road Runner Frame Grabber", 0x0244 : "Road Runner Frame Grabber", 0x0312 : "Road Runner Frame Grabber", 0x0314 : "Road Runner Frame Grabber", 0x0324 : "Road Runner Frame Grabber", 0x0344 : "Road Runner Frame Grabber", }, 0x118E : { 0x0042 : "", 0x0142 : "", 0x0242 : "", 0x0342 : "", 0x0440 : "", 0x0442 : "", 0x0842 : "red", }, 0x1190 : { 0x2550 : "Single Chip Ultra (Wide) SCSI Processor", 0xC721 : "EIDE", 0xC731 : "PCI Ultra (Wide) SCSI Adapter", }, 0x1191 : { 0x0001 : "IDE Ctrlr", 0x0002 : "UltraDMA33 EIDE Controller (AEC6210UF)", 0x0003 : "SCSI-2 cache Cntrlr", 0x0004 : "UltraDMA33 EIDE Controller", 0x0005 : "UltraDMA33 EIDE Controller (AEC6210UF)", 0x0006 : "UltraDMA66 EDIE Controller (AEC6260)", 0x0007 : "UltraDMA66 EIDE Controller (AEC6260)", 0x0008 : "2CH PCI UltraDMA133 IDE Controller", 0x0009 : "AEC6280PATA133|AEC6880 PATA RAID|AEC6290 SATA|AEC6890 SATA RAID|AEC6891 SATA RAID", 0x000a : "ACARD AEC-6885/6895/6896 RAID Controller", 0x000B : "ACARD AEC-6897/6898 RAID Controller", 0x000D : "2S1P PCI-X SATA(3G)/UDMA Combo Controller", 0x8001 : "SCSI-2 RAID (cache?) Adapter (AEC6820U)", 0x8002 : "AEC6710S/U/UW SCSI-2 Host Adapter ", 0x8010 : "Ultra Wide SCSI Controller", 0x8020 : "AEC6712U/TU Ultra SCSI Controller", 0x8030 : "AEC 6712S/TS Ultra SCSI Controller", 0x8040 : "SCSI Controller", 0x8050 : "AEC6715UW Ultra Wide SCSI Controller", 0x8060 : "SCSI Host Adapter/PAYPAL.COM/X.COM", 0x8081 : "PCI Ultra160 LVD/SE SCSI Adapter", 0x808A : "AEC6710S/U/UW SCSI-2 Host Adapter", }, 0x1197 : { 0x010C : "8-bit 2GS/s Analog Input Card", }, 0x1199 : { 0x0001 : "IRMA 3270 PCI Adapter", 0x0002 : "Advanced ISCA PCI Adapter", 0x0201 : "SDLC PCI Adapter", }, 0x119B : { 0x1221 : "PCI PCMCIA bridge", }, 0x119E : { 0x0001 : "FireStream 155 ATM adapter", 0x0003 : "FireStream 50 ATM adapter", }, 0x11A8 : { 0x7302 : "NTX-8023-PCI 2MB Long Card", 0x7308 : "NTX-8023-PCI 8MB Long Card", 0x7402 : "NTX-8023-PCI 2MB Short Card", 0x7408 : "NTX-8023-PCI 8MB Short Card", }, 0x11A9 : { 0x4240 : "pci matchmaker 9622qac", }, 0x11AB : { 0x0028 : "MCP67 High Definition Audio", 0x0146 : "System Ctrlr for R4xxx/5000 Family CPUs", 0x11AB : "Gigabit Ethernet Controller", 0x11AB : "Marvell Yukon 88E8055 PCI-E Gigabit Ethernet Controller", 0x11AB : "Gigabit Ethernet Controller", 0x13F8 : "802.11 Adapter", 0x1fa6 : "The Libertas WLAN 802.11b/g", 0x1FA7 : "Libertas WLAN 802.11b/g", 0x1fa8 : "54M Wireless 802.11b PCI wifi Adapter Card", 0x1FAA : "Marvell Libertas 802.11 b/g Wireless (8335)", 0x2A30 : "PCI-Express 802.11bg Wireless", 0x4320 : "Marvell Yukon PCI E Gigabit drivers for d", 0x4350 : "Yukon PCI-E Fast Ethernet Controller", 0x4351 : "Yukon PCI-E Fast Ethernet Controller", 0x4352 : "Marvell Yukon 88E8038 PCI-E Fasvt Ethernet Controller", 0x4353 : "88E8039 PCIe Fast Ethernet Controller", 0x4354 : "Marvell Yukon 88E8040 PCI-E Fast Ethernet Controller", 0x4355 : "Marvell Yukon 88E8040T PCI-E Fast Ethernet Controller", 0x4357 : "marvell ethernet lan No painel ", 0x4360 : "Yukon PCI-E ASF Gigabit Ethernet Controller", 0x4361 : "Marvell Yukon 88E8036 Network Card", 0x4362 : "Marvell Yukon 88E8053 PCI-E Gigabit Ethernet Controller", 0x4363 : "Yukon PCI-E Gigabit Ethernet Controller", 0x4364 : "Yukon PCI-E Gigabit Ethernet Controller", 0x4365 : "Yukon Gigabit Controller DRIVER", 0x436A : "Marvell Yukon 88E8058", 0x436b : "Marvell Yukon 8072", 0x436b : "Marvell Yukon PCI-E Gigabit Ethernet Controller", 0x436C : "Marvell 8072 Ethernet Nic", 0x4380 : "Marvell Yukon 88E8057 PCI-E Gigabit Ethernet Controller", 0x4381 : "Marvell Yukon 88E8059 PCI-E Gigabit Ethernet Controller", 0x4611 : "System Controller", 0x4620 : "System Controller for R5000 & R7000 (64-bit PCI)", 0x4801 : "8 port switched ethernet ctrlr", 0x4809 : "Evaluation board for the GT-48300", 0x5005 : "Belkin Desktop Gigabit PCI card", 0x5040 : "4-port SATA I PCI-X Controller", 0x5041 : "4-port SATA I PCI-X Controller", 0x5080 : "SATA Controller", 0x5081 : "SATA Controller", 0x6041 : "Marvell Technology Group Ltd. MV88SX6041 4-port SATA II PCI-X Controller (rev 03)", 0x6081 : "PCI-X RocketRAID 222x SATA Controller", 0x6101 : "PATA 133 One Channel", 0x6111 : "61xx RAID", 0x6120 : "61xx RAID", 0x6121 : "61xx AHCI", 0x6122 : "61xx RAID", 0x6140 : "61xx RAID", 0x6145 : "Marvell 6145 SATA II PCI-E Controller 4 SATA2 300MB/s ports - Thor 4S/1P", 0x6320 : "System Controller for PowerPC Processors", 0x6440 : "64xx/63xx SAS", 0x6480 : "PowerPC System Controller", 0x6485 : "Marvel 88SE6480 is the chip on the mainboard", 0x9128 : "SATA3 6 GB/s SATA3/Raid Controller", 0x91A2 : "Sata 6G RAID Controller", 0x9653 : "Advanced Communication Controller", 0x9876 : "marvell yukon 88E8038 pci-e fast ethernet controller", 0xF003 : "Primary Image Piranha Image Generator", 0xF004 : "Primary Image Barracuda Image Generator", 0xF006 : "Primary Image Cruncher Geometry Accelerator", 0xFFFF : "PATA2SATA/SATA2PATA Bridge", }, 0x11AD : { 0x0001 : "Fast Ethernet Adapter", 0x0002 : "NETGEAR FA310TX Fast Ethernet PCI Adapter", 0xC115 : "PNIC II PCI MAC/PHY", }, 0x11AE : { 0x4153 : "Bridge Controller", 0x5842 : "Bridge Controller", }, 0x11AF : { 0x0001 : "9704", 0x000A : " ", 0x000B : " ", }, 0x11B0 : { 0x0001 : "i960 Local Bus to PCI Bridge", 0x0002 : "i960Jx Local Bus to PCI Bridge", 0x0004 : "i960Cx/Hx Local Bus to PCI Bridge", 0x0010 : "Am29K Local Bus to PCI Bridge", 0x0021 : "i960Sx Local Bus to PCI Bridge", 0x0022 : "i960Jx Local Bus to PCI Bridge", 0x0024 : "i960Cx/Hx Local Bus to PCI Bridge", 0x0030 : "Am29K Local Bus to PCI Bridge", 0x0100 : "PCI System Ctrlr for 32-bit MIPS CPU", 0x0101 : "PCI System Ctrlr for 32-bit MIPS CPU", 0x0102 : "PCI System Ctrlr for Super-H SH3 CPU", 0x0103 : "PCI System Ctrlr for Super-H SH4 CPU", 0x0200 : "High Performance PCI SDRAM Controller", 0x0292 : "Am29030/40 Bridge", 0x0500 : "PCI System Ctrlr for 64-bit MIPS CPU", 0x0960 : "i960 Bridges for i960 Processors", 0x4750 : "SCRAMNet", 0xC960 : "i960 Dual PCI Bridge", }, 0x11B5 : { 0x0001 : "1553 Bus Interface Card", 0x0002 : "FLASH memory Card", 0x0003 : "Multi Media Adapter", 0x0004 : "Video Graphics Overlay", 0x0005 : "PPzero Slave Interface Card", 0x0006 : "PPzero Master Interface Card", 0x0007 : "Serial/1553 Interface Card", 0x0008 : "Intelligent Serial/Ethernet Card", 0x0009 : "Parallel I/O Module", 0x000a : "Fibre Channel Adapter", 0x000b : "High Speed DSP Gateway Module", 0x000c : "Memory Adaptor Module", 0x0012 : "FLASH memory Card (V2)", 0x0013 : "1553 Bus Interface Card", 0x0014 : "1553 Bus Interface Card", 0x2200 : "Dual Fibre Channel Adapter", }, 0x11B8 : { 0x0001 : "", }, 0x11B9 : { 0xC0ED : "", }, 0x11BC : { 0x0001 : "PCI FDDI", }, 0x11BD : { 0x0015 : "rob2d", 0x1111 : "www.unibobodioulasso.0fees.net", 0x1158 : "Tunner Royal TS 1", 0x11BD : "maintenance informatique", 0x2020 : "70009823/76199706", 0xBEBE : "MAINTENANCE INFORMATIQUE VENTE DE CONSOMABLE", 0xBEDE : "Pinnacle Studio 700 PCI", }, 0x11C1 : { 0x0440 : "Data+Fax+Voice+DSVD", 0x0441 : "modem driver", 0x0442 : "LT WinModem 56K Data+Fax", 0x0443 : "1646T00", 0x0444 : "845G", 0x0445 : "", 0x0446 : "PCIVEN_10DE&DEV_03d1&subsys_26011019&rev_a23&2411e6fe&0&68", 0x0447 : "windowsme", 0x0448 : "SV2P2", 0x0449 : "0449144F", 0x044A : "pci ven_1904", 0x044B : "USBVID_13FD&PID_1650&REV_0446", 0x044C : "SV95PL-TOO", 0x044D : "", 0x044E : "LT WinModem 56k Data+Fax or Agere F-1156IV/A3", 0x044F : "LT V.90+DSL WildFire Modem", 0x0450 : "LT Winmodem 56K", 0x0451 : "LT WinModem 56k Data+Fax+Voice+DSVD", 0x0452 : "1513144", 0x0453 : "", 0x0454 : "", 0x0455 : "", 0x0456 : "", 0x0457 : "", 0x0458 : "Mars 3 Mercury v.92 v.44", 0x0459 : "", 0x045A : "", 0x045D : "mars2", 0x0461 : "V90 Wildfire Modem", 0x0462 : "56K.V90/ADSL Wildwire Modem", 0x0464 : "Lucent Wildwire v.90 + DSL modem", 0x0480 : "56k.V90/ADSL Wildfire Modem ", 0x048b : "creative modem blaster di5733-1", 0x048C : "net-comm modem", 0x048d : "9m56pml-g", 0x048E : "56k V.92modem", 0x048F : "Agere PCI Soft Modem. SV92PL", 0x0540 : "", 0x0600 : "SV92P-T00 Agere PCI Soft Modem. SV92PL", 0x0620 : "Agere PCI Soft Modem ", 0x0630 : "#1: 32 pins", 0x1040 : "Agere Systems HDA Modem", 0x11c1 : "Agere Systems HDA", 0x3026 : "Agere Modem", 0x3055 : "Agere Systems HDA Modem v6081", 0x4758 : "Mach64 GX", 0x5400 : "FPSC FPGA with 32/64bit", 0x5801 : "USB Open Host Controller", 0x5802 : "2-port PCI-to-USB OpenHCI Host Ctrlr", 0x5803 : "QuadraBus 4-port USB OpenHCI Host Ctrlr", 0x5805 : "USB Advanced Host Controller", 0x5811 : "1394A PCI PHY/Link Open Host Ctrlr I/F", 0x5901 : "firewire chip for macbook pro", 0x9876 : "LT WinModem 56K Data+Fax", 0xAB20 : "PCI Wireless LAN Adapter", 0xAB30 : "Mini-PCI WaveLAN a/b/g", 0xED00 : "PCI-E Ethernet Controller", 7121 : "", }, 0x11C6 : { 0x3001 : "VM-1200 Opto Unit Controller", }, 0x11C8 : { 0x0658 : "32 bit ", 0xD665 : "64 bit ", 0xD667 : "64 bit ", }, 0x11C9 : { 0x0010 : "16-line serial port w/DMA", 0x0011 : "4-line serial port w/DMA", }, 0x11CB : { 0x2000 : "port small IC", 0x4000 : "XIO/SIO Host", 0x8000 : "Bridge RIO Host", }, 0x11CE : { 0x102B : "FF00102B", }, 0x11D1 : { 0x01F7 : "PCI Video Processor", 0x01F8 : "PCI Video Processor", 0x01f9 : "tuner card", }, 0x11D4 : { 0x11D4 : "1986", 0x11d4 : "266e&subsys", 0x1535 : "ADSP-21535", 0x1805 : "62412-51", 0x1884 : "AD1884HD", 0x1889 : "AD1980", 0x194A : "AD1984A", 0x1981 : "7037", 0x1983 : "AD1983HD", 0x1984 : "Analog Devices ADI 1984", 0x1986 : "ADI1986A", 0x1988 : "AD1981", 0x198B : "AD1988B", 0x2192 : "ADSP-2192", 0x219A : "ADSP-2192", 0x219E : "ADSP-2192", 0x2F44 : "ADSP-1882", 0x989B : "AD1989B", }, 0x11D5 : { 0x0115 : "Versatec Parallel Interface (VPI) + Centronics", 0x0116 : "DR11-W emulator", 0x0117 : "Versatec Parallel Interface (VPI) + Centronics", 0x0118 : "DR11-W emulator", }, 0x11DA : { 0x2000 : "Virtual-Bus / AlacrityVM bridge", }, 0x11DB : { 0x1234 : "Dreamcast Broadband Adapter", }, 0x11DE : { 0x6057 : "Mc3aDo <a href=", 0x6067 : "zoran", 0x6120 : "MPEG VideoBVPSXI Capture Card", 0x6057 : "ZORAN PCI Bridge (interface for transferring video across the PCI bus)", 0x9876 : "", }, 0x11EC : { 0x0028 : "MCP67 High Definition Audio", 0x2064 : "", }, 0x11F0 : { 0x2772 : "PCIVEN_8086&DEV_2772&SUBSYS_0CCB105B&REV_023&2411E6FE&0&10", 0x4 : "PCIVEN_8086&DEV_2772&SUBSYS_0CCB105B&REV_023&2411E6FE&0&10", 0x4231 : "2", 0x4232 : "PCIVEN_8086&DEV_2772&SUBSYS_0CCB105B&REV_023&2411E6FE&0&10", 0x4233 : "", 0x4234 : "", 0x4235 : "", 0x4236 : "", 0x4731 : "Gigabit Ethernet Adapter", 0x9876 : "2", }, 0x11F4 : { 0x2915 : "", }, 0x11F6 : { 0x0112 : "ReadyLink ENET100-VG4", 0x0113 : "FreedomLine 100", 0x1401 : "ReadyLink RL2000", 0x2011 : "ReadyLink RL100ATX/PCI Fast Ethernet Adapter", 0x2201 : "ReadyLink 100TX (Winbond W89C840)", 0x9881 : "ReadyLink RL100TX Fast Ethernet Adapter", }, 0x11F8 : { 0x7364 : "FREEDM-32 Frame Engine & Datalink Mgr", 0x7366 : "FREEDM-8 Frame Engine & Datalink Manager", 0x7367 : "FREEDM-32P32 Frame Engine & Datalink Mgr", 0x7375 : "LASAR-155 ATM SAR", 0x7380 : "FREEDM-32P672 Frm Engine & Datalink Mgr", 0x7382 : "FREEDM-32P256 Frm Engine & Datalink Mgr", 0x7384 : "FREEDM-84P672 Frm Engine & Datalink Mgr", 0x8000 : "6G SAS/SATA Controller", 0x8010 : "6G SAS/SATA RAID Controller", }, 0x11FB : { 0x0417 : "PCI-417 High Speed A/D Board", }, 0x11FE : { 0x0001 : "", 0x0002 : "", 0x0003 : "", 0x0004 : "", 0x0005 : "", 0x0006 : "", 0x0007 : "", 0x0008 : "", 0x0009 : "", 0x000A : "", 0x000B : "", 0x000C : "", 0x000D : "", 0x8015 : "4-port UART 16954", }, 0x1202 : { 0x0001 : "PCI ATM Adapter", }, 0x1203 : { 0x0001 : "Unknown", }, 0x1204 : { 0x9876 : "wwDW", }, 0x1208 : { 0x4853 : "HS-Link Device", }, 0x1209 : { 0x0100 : "PLX PCI BRIDGE", }, 0x120E : { 0x0100 : "Multiport Serial Card", 0x0101 : "Multiport Serial Card", 0x0102 : "Multiport Serial Card", 0x0103 : "Multiport Serial Card", 0x0104 : "Multiport Serial Card", 0x0105 : "Multiport Serial Card", 0x0200 : "Intelligent Multiport Serial", 0x0201 : "Intelligent Serial Card", 0x0300 : "1105", 0x0301 : "", 0x0302 : "", 0x0303 : "teclado", }, 0x120F : { 0x0001 : "", }, 0x1210 : { 0x25f4 : "No data", }, 0x1216 : { 0x0003 : "PTM400 PCI Taxi Module", }, 0x1217 : { 0x00f7 : "1394 Open Host Controller Interface", 0x1217 : "111111111", 0x6729 : "PCI to PCMCIA Bridge", 0x673A : "PCI to PCMCIA Bridge", 0x6832 : "CardBus Controller", 0x6836 : "CardBus Controller", 0x6872 : "CardBus Controller", 0x6925 : "CardBus Controller", 0x6933 : "CardBus Controller", 0x6972 : "CardBus Controller", 0x7110 : "MemoryCardBus Accelerator", 0x7112 : "", 0x7113 : "PCMCIA/SmartCardBus Contoller", 0x7114 : "CardBus Controller", 0x7120 : "O2Micro Integrated MMC/SD controller", 0x7130 : "O2Micro Integrated MMC/SD/MS/xD/SM Controller", 0x7134 : "MemoryCardBus Controller 6-in-1", 0x7135 : "MemoryCardBus Contoller", 0x7136 : "O2Micro CardBus Controller", 0x71E2 : "", 0x7212 : "", 0x7213 : "", 0x7222 : "pci to pcmcia bridge", 0x7223 : "MemoryCardBus Controller", 0x8130 : "o2 sd card reader", 0x8231 : "O2Micro OZ600XXX Memory Card ", 0x8330 : "Mass storage controller [0180]", 0x8331 : "O2Micro Integrated MS/PRO controller", }, 0x121A : { 0003 : "", 003 : "", 0x0001 : "Voodoo 3D Acceleration Chip", 0x0002 : "Voodoo 2 3D Accelerator", 0x0003 : "Voodoo Banshee", 0x0005 : "All Voodoo3 chips", 0x0007 : "", 0x0009 : "AGP X2", 0x0010 : "Rev.A AGPx4", 0x0057 : "Avenger", }, 0x1220 : { 0x1220 : "AMCC 5933 TMS320C80 DSP/Imaging Board", 0x4242 : "controller audio multimediale", }, 0x1223 : { 0x0001 : "Real-Time Processing Blade in a standard single-slot AdvancedTCA formfactor", 0x0002 : "Intel Pentium-M based AMC Module", 0x0016 : "PCIe-8120 MGW Octasic DSP card ", 0x003 : "Advanced Tri-Processor Blade", 0x004 : "Advanced Tri-Processor Blade", 0x0044 : "Memory controller", 0x005 : "Real-time Processing Blade", 0x006 : "Real-time Processing Blade", 0x007 : "Processor PMC Carrier Card", 0x008 : "Up to 8 E1/T1/J1 interfaces for PMC-compatible baseboards", 0x009 : "Third Generation E1 and T1/J1 interfaces for PMC-compatible baseboards", 0x010 : "SIGTRAN Signalling Gateway Blade", 0x011 : "64 SS7 signaling channels on a single blade", 0x012 : "Portable", 0x013 : "STREAMS-Based Frame Relay Implementation", 0x014 : "Implementation of the UNIX STREAMS Environment", 0x015 : "Transparent STREAMS Interface for High Speed LAN or Shared Memory Systems", 0x7207 : "PCIe7207 Server Accelerator", }, 0x1224 : { 0x1000 : "Plum Audio", }, 0x122D : { 0x1206 : "Asus", 0x4201 : "AMR 56K modem", 0x50DC : "Audio", 0x80DA : "Audio", }, 0x122F : { 0x37AF : "Reflectometer using PLX 9030", }, 0x1236 : { 0x0000 : "RealMagic64/GX", 0x0531 : "MX98715/25", 0x3d01 : "000", 0x6401 : "REALmagic64/GX", 0x9708 : "realmagic64/gx", }, 0x123D : { 0x0010 : "PCI-DV Digital Video Interface", }, 0x123F : { 0x00E4 : "MPEG", 0x6120 : "DVD device", 0x8120 : "i440B", 0x8888 : "cPEG C 3.0 DVD/MPEG2 Decoder", }, 0x1241 : { 0x1603 : "keyboard", }, 0x1242 : { 0x1460 : "2-Gb/s Fibre Channel-PCI 64-bit 66 MHz", 0x1560 : "Dual Channel 2 Gb/s Fibre Channel-PCI-X", 0x4643 : "JNI PCI 64-bit Fibrechannel (needs clone)", }, 0x1244 : { 0x0700 : "ISDN controller", 0x0800 : "ISDN Controller", 0x0A00 : "ISDN Controller", 0x0E00 : "Fritz!PCI 2.0 ISDN Controller", 0x1100 : "ISDN Controller", 0x1200 : "ISDN Controller", 0x2700 : "DSP TNETD5100GHK / TNETD5015", 0x2900 : "AVM Fritz!Card DSL v2.0 PCI", }, 0x124A : { 0x10BD : "Intel Gigabit network connection", 0x4023 : "Blitzz Wireless G", }, 0x124C : { 0x0220 : ".", }, 0x124D : { 0x0000 : "", 0x0002 : "", 0x0003 : "", }, 0x124F : { 0x0041 : "PCI RAID Controller", }, 0x1250 : { 0x1978 : "", 0x2898 : "", }, 0x1255 : { 0x1110 : "", 0x1210 : "", 0x2110 : "VideoPlex pci bpc1825 rev a", 0x2120 : "VideoPlex BPC 1851 A", 0x2130 : "", }, 0x1256 : { 0x4201 : "EIDE Adapter", 0x4401 : "Dale EIDE Adapter", 0x5201 : "IntelliCache SCSI Adapter", }, 0x1258 : { 0x1988 : "", }, 0x1259 : { 0x2503 : "", 0x2560 : "AT-2560 Fast Ethernet Adapter (i82557B)", 0xc107 : "", }, 0x125B : { 0x0B95 : "USB2.0 to 10/100M Fast Ethernet Controller", 0x1400 : "ASIX AX88140 Based PCI Fast Ethernet Adapter", 0x1720 : "USB2 to Fast Ethernet Adapter", }, 0x125D : { 0x0000 : "PCI Fax Modem (early model)", 0x1961 : "ESS Solo-1 Soundcard", 0x1968 : "Maestro-2 PCI audio accelerator", 0x1969 : "Solo-1 PCI AudioDrive family", 0x1978 : "ESS Maestro-2E PCI Audiodrive", 0x1980 : "subsys_0012103c_rev_12", 0x1988 : "ESS Allegro PCI Audio (WDM)", 0x1989 : "ESS Maestro 3 PCI Audio Accelerator", 0x1990 : "", 0x1992 : "", 0x1998 : "Maestro 3i", 0x1999 : "TAWE0548S", 0x199B : "Maestro-3.COMM PCI Voice+audio", 0x2808 : "PCI Fax Modem (later model)", 0x2828 : "TeleDrive", 0x2838 : "PCI Data Fax Modem", 0x2839 : "Superlink Modem/V.92 chipset 56K", 0x2898 : "TelDrive ES56T-PI family V.90 PCI modem", }, 0x125F : { 0x2084 : "AMCC Bridge + 2 x Super I/O (National PC97338)", }, 0x1260 : { 0x3860 : "PRISM 2.5 802.11b 11Mbps Wireless Controller", 0x3872 : "LAN-Express IEEE 802.11b PCI Adapter", 0x3873 : "PRISMII.5 IEE802.11g Wireless LAN", 0x3886 : "Creatix CTX405 WLAN Controller / ZyAir G100 - WLAN", 0x3890 : "PRISM GT 802.11g 54Mbps Wireless Controller", 0x8130 : "NTSC/PAL Video Decoder", 0x8131 : "NTSC/PAL Video Decoder", }, 0x1266 : { 0x0001 : "NE10/100 Adapter (i82557B)", 0x1910 : "NE2000Plus (RT8029) Ethernet Adapter", }, 0x1267 : { 0x1016 : "NICCY PCI card", 0x4243 : "Satellite receiver board / MPEG2 decoder", 0x5352 : "", 0x5A4B : "", }, 0x1268 : { 0x0204 : "Tektronix IO Processor / Tektronix PCI Acquisition Interface Rev 204", }, 0x126A : { 0x2698 : "sm bus controller", 0x269B : "SM Bus Controller", }, 0x126C : { 0x1F1F : "e-mobility 802.11b Wireless LAN PCI Card", }, 0x126F : { 0x0501 : "Mobile Multimedia Companion Chip (MMCC)", 0x0710 : "LynxEM", 0x0712 : "LynxEM+", 0x0720 : "Lynx3DM", 0x0810 : "LynxE", 0x0811 : "LynxE", 0x0820 : "Lynx3D", 0x0910 : "SILICON MOTION", 0x2260 : "PCIe SSD (NVMe/AHCI)", }, 0x1272 : { 0x0780 : "PCIVEN_8086&DEV_1C3A", 0x1272 : "PCIVEN_8086&DEV_1C3A&SUBSYS_1C3A1458", 0x1c3A : "0X78000", 0x9876 : "PCIVEN_1272&DEV_0780&SUBSYS_00000008&REV_7A3&61AAA01&0&58", }, 0x1273 : { 0x0002 : "t9p17af-01", }, 0x1274 : { 0X1005 : "Serial PCI Port", 0x1274 : "multimedia audio device", 0x1371 : "Creative AudioPCI (ES1371", 0x1373 : "Sound Blaster Audio(PCI)", 0x5000 : "AudioPCI", 0x5880 : "Soundblaster (CT4750)", 0x9876 : "", }, 0x1278 : { 0x0701 : "PowerPC Node", 0x1001 : "TMB17 Motherboard", }, 0x1279 : { 0x0060 : "Efficeon Virtual Northbridge", 0x0061 : "Efficeon AGP Bridge", 0x0295 : "Virtual Northbridge", 0x0395 : "Northbridge", 0x0396 : "SDRAM Controller", 0x0397 : "BIOS scratchpad", }, 0x127E : { 0x0010 : "Videum 1000 AV Plus", }, 0x1282 : { 0x1282 : "DEV", 0x9009 : "Ethernet Adapter", 0x9100 : "", 0x9102 : "10/100 Mbps Fast Ethernet Controller", }, 0x1283 : { 0x0801 : "Audio Digital Controller", 0x673A : "IDE Controller", 0x8152 : "Advanced RISC-to-PCI Companion Chip", 0x8172 : "Ultra RISC (MIPS", 0x8211 : "ATA/ATAPI Controller", 0x8212 : "ATA 133 IDE RAID Controller", 0x8213 : "IDE Controller", 0x8330 : "Host Bridge", 0x8872 : "PCI-ISA I/O chip with SMB & Parallel Port", 0x8875 : "PCI Parallel Port", 0x8888 : "PCI to ISA Bridge", 0x8889 : "sound", 0x9876 : "PCI I/O CARD", 0xE886 : "PCI to ISA Bridge", }, 0x1285 : { 0x0100 : "Maestro-1 AudioDrive", }, 0x1287 : { 0x001E : "DVD Decoder", 0x001F : "DVD Decoder", 0x0020 : "MPEG/DVD video decoder", }, 0x1289 : { 0x1006 : "1708", }, 0x128A : { 0xF001 : "controller ethernet", }, 0x128D : { 0x0021 : "ATM Adapter", }, 0x1290 : { 0x0010 : "?", }, 0x129A : { 0x0415 : "PCI 66MHz Analyzer and 33MHz Exerciser", 0x0515 : "PCI 66MHz Analyzer and Exerciser", 0x0615 : "PCI 66MHz and PCI-X 100MHz Bus Analyzer and Exerciser", 0x0715 : "PCI 66MHz and PCI-X 133MHz Bus Analyzer and Exerciser", 0xDD10 : "Digital Parallel Input Output Device 32bit", 0xDD11 : "Digital Parallel Input Output Device 64bit", 0xDD12 : "Digital Parallel Input Output Device 64bit", }, 0x12A0 : { 0x0008 : "Allen-Bradley 1784-PKTX", }, 0x12A3 : { 0xECB8 : "V.92 Lucent Modem", }, 0x12AA : { 0x5568 : "WANic 400 series X.21 controller", 0x556C : "NAI HSSI Sniffer PCI Adapter", }, 0x12AB : { 0x3000 : "PCI", }, 0x12AD : { 0x0010 : "HERMES-S0", 0x0020 : "HERMES-PRI", 0x0080 : "HERMES-PRI/PCIX", }, 0x12AE : { 0x0001 : "ACEnic 1000 BASE-SX Ethernet adapter", 0x0002 : "Copper Gigabit Ethernet Adapter", }, 0x12B9 : { 0x00c2 : "pci simple communication controller", 0x1006 : "5610 56K FaxModem WinModem", 0x1007 : "US Robotics 56K DATA FAX WINMODEM", 0x1008 : "USR5610B (0005610-02) 56K Performance Pro Modem (PCI Internal)", 0x12b9 : "pci simple communication controller", 0x3F0 : "US Robotics 56K Fax PCI aka Model 0726", }, 0x12BA : { 0x0032 : "Hammerhead-Lite-PCI", 0x0041 : "Stratix5 Family FPGA", }, 0x12C1 : { 0x9080 : "Communications Processor", }, 0x12C3 : { 0x0058 : "LAN Adapter (NE2000-compatible)", 0x5598 : "Ethernet Adapter (NE2000-compatible)", }, 0x12C4 : { 0x0001 : "", 0x0002 : "", 0x0003 : "", 0x0004 : "", 0x0005 : "BlueHeat 8 Port RS232 Serial Board", 0x0006 : "", 0x0007 : "", 0x0008 : "", 0x0009 : "", 0x000A : "", 0x000B : "", 0x000C : "", 0x000D : "", 0x000E : "", 0x000F : "", 0x0300 : "", 0x0301 : "", 0x0302 : "", 0x0303 : "", 0x0304 : "", 0x0305 : "", 0x0306 : "", 0x0307 : "", 0x0308 : "Starcom UM100 Wireless modem for WiMax ", 0x0309 : "", 0x030A : "", 0x030B : "", }, 0x12C5 : { 0x007F : "PEI Imaging Subsystem Engine", 0x0081 : "PCI Thresholding Engine", 0x0085 : "Video Simulator/Sender", 0x0086 : "Multi-scale Thresholder", }, 0x12C7 : { 0x0546 : "D120JCT-LS Card", 0x0561 : "BRI/2 Type Card (Voice Driver)", 0x0647 : "D/240JCT-T1 Card", 0x0648 : "D/300JCT-E1 Card", 0x0649 : "D/300JCT-E1 Card", 0x0651 : "MSI PCI Card", 0x0673 : "BRI/160-PCI Card", 0x0674 : "BRI/120-PCI Card", 0x0675 : "BRI/80-PCI Card", 0x0676 : "D/41JCT Card", 0x0685 : "D/480JCT-2T1 Card", 0x0687 : "D/600JCT-2E1 (75 Ohm) Card", 0x0689 : "Dialogic 2E1 - JCT series", 0x0707 : "D/320JCT (Resource Only) Card", 0x0708 : "D/160JCT (Resource Only) Card", }, 0x12CB : { 0x0027 : "studiocard", 0x002D : "agp", 0x002E : "", 0x002F : "", 0x0030 : "", 0x0031 : "", 0x0032 : "20-bit 2-in", 0x0033 : "", 0x0034 : "", 0x0035 : "", }, 0x12D1 : { 0x1001 : "MSM6246", 0x1003 : "173", 0x140B : "EC159", 0x1412 : "09HT1407", 0x1446 : "E1800", 0x14c5 : "K4204", 0x1506 : "E5776", 0x1520 : "-e620", 0x1802 : "unknown", 0x3609 : "N/A", }, 0x12D4 : { 0x0301 : "EP1S", }, 0x12D5 : { 0x1000 : "Broadband Signal Processor", 0x1002 : "Digital Signal Processor", }, 0x12D8 : { 0x2304 : "GENII PCI Express Packet Switch", 0x71E2 : "3 Port PCI to PCI bridge", 0x8140 : "4 Port PCI to PCI bridge", 0x8150 : "2-Port PCI to PCI Bridge", 0x8152 : "2-Port PCI-To-PCI Bridge", 0xA404 : "PCIe Packet Switch", 0xE111 : "PCI to PCIe Bridge", 0xe130 : "PCI-X Bridge", }, 0x12DB : { 0x0003 : "FoxFire II", }, 0x12DE : { 0x0200 : "Cryptoswift 200", }, 0x12DF : { 0x2102 : "Communications Controller", 0x8236 : "PCI Controller", }, 0x12E0 : { 0x0010 : "Quad UART", 0x0020 : "Quad UART", 0x0030 : "Quad UART", }, 0x12E4 : { 0x1000 : "PRI Controller", 0x1140 : "ISDN Controller", 0xB005 : "BRI Controller", 0xB006 : "BRI Controller", }, 0x12EB : { 0x0001 : "Vortex 1 Digital Audio Processor", 0x0002 : "Vortex 2 Audio Processor", 0x0003 : "Aureal Soundcard", }, 0x12EC : { 0x8139 : "0xxxx", 0x8140 : "asf", }, 0x12F2 : { 0x1002 : "Grapics Radeon X850", 0x3059 : "AC97 Enhanced Audio Controller - the 8251 controller is different", }, 0x12F8 : { 0x0002 : "s3 trio", }, 0x12FC : { 0x5cec : "IEEE 488", }, 0x1303 : { 0x0001 : "cM67 CompactPCI DSP Card", 0x0002 : "M44/cM44 DSP board", 0x0003 : "Quattro6x DSP board", 0x0004 : "Chico/ChicoPlus Data Acquisition Board", 0x0005 : "Code Hammer Jtag Debugger board", 0x0006 : "Matador DSP board", 0x0007 : "Quixote DSP board", 0x0008 : "Quadia C64x DSP", 0x0009 : "Quadia DSP Baseboard", }, 0x1304 : { 0x0004 : "PTX SAM FPGA", }, 0x1307 : { 0x0001 : "", 0x0006 : "PCI 488.2", 0x000B : "", 0x000C : "", 0x000D : "", 0x000F : "", 0x0010 : "", 0x0014 : "24 Bit Digital Input/Output Board", 0x0015 : "", 0x0016 : "", 0x0017 : "", 0x0018 : "", 0x0019 : "xkbhavu", 0x001A : "", 0x001B : "", 0x001C : "AR2425", 0x001D : "", 0x001E : "", 0x001F : "", 0x0020 : "", 0x0021 : "", 0x0022 : "", 0x0023 : "", 0x0024 : "", 0x0025 : "", 0x0026 : "", 0x0027 : "", 0x0028 : "24 Bit Digital Input/Output Board", 0x0029 : "", 0x002C : "PCI-INT32", 0x0033 : "", 0x0034 : "", 0x0035 : "65+6", 0x0036 : "Dell", 0x0037 : "", 0x004C : "", 0x004D : "", 0x0064 : "10 channels", 0x0361 : "?", }, 0x1308 : { 0x0001 : "NetCelerator Adapter", }, 0x130B : { 0x130b : "0x9876", }, 0x1310 : { 0x0003 : "CompactPCI Interface", 0x000D : "FPGA PCI Bridge", }, 0x1317 : { 0x0531 : "ADMtek AN986", 0x0981 : "FastNIC 10/100 Fast Ethernet Adapter", 0x0985 : "Linksys LNE 100TX Fast Ethernet Adapter(LNE100TX v4)", 0x1985 : "CardBus 10/100 Fast Ethernet&#1050;&#1086;&#1085;&#1090;&#1088;&#1086;&#1083;&#1083;&#1077;&#1088;", 0x2850 : "HSP56 MicroModem", 0x5120 : "ADMtek ADM5120 SOC (rev: 0)", 0x7892 : "HSP56 MicroModem", 0x8201 : "802.11b Wireless PCI Card", 0x9511 : "PCI 10/100 Fast Ethernet Adapter", 0x9513 : "PCI 10/100 Fast Ethernet Adapter", 8201 : "1317", }, 0x1318 : { 0x0911 : "G-NIC II", }, 0x1319 : { 0x0801 : "PCI Card MediaForte made in singapore (driver - ct", 0x0802 : "Xwave PCI Joystick", 0x1000 : "PCI Audio", 0x1001 : "Joystick", 0x1319 : "Xwave PCI audio controller", 0x4901 : "ForteMedia PCI Audio Card", 802 : "PCI Audio", }, 0x131F : { 0x2002 : "CyberSerial 16850", 0x2011 : "Siig Inc Duet 1S(16550)+1P", 0x2012 : "Duet 1S(16850)+1P", 0x2020 : "Communication controller", 0x2030 : "SIIG Cyber Serial Dual PCI Board", 0x2042 : "Trio 1S(16850)+2P", 0x2050 : "Siig Inc CyberSerial (4-port) 16550", 0x2051 : "CyberSerial 16650", 0x2052 : "CyberSerial 16850", 0x2060 : "Trio 2S(16550)+1P", 0x2061 : "Trio 2S(16650)+1P", 0x2062 : "Trio 2S(16850)+1P", 0x9876 : "Trio 2S(16550)+1P", }, 0x1328 : { 0x2048 : "", 0x8888 : "cPEG C 3.0 DVD/MPEG2 decoder", }, 0x1332 : { 0x5410 : "PCI 32bit Bulk Memory w/DMA", 0x5415 : "PCI Battery Backed SDRAM Adapter", 0x5425 : "PCI Memory Module with Battery Backup", 0x6140 : "Memory Module", }, 0x133D : { 0x1000 : "Industrial I/O Card", }, 0x1344 : { 0x3240 : "", 0x3320 : "MT8LLN21PADF", 0x3321 : "21PAD", 0x3470 : "MT7LLN22NCNE", 0x4020 : "123", 0x4030 : "", }, 0x134A : { 0x0001 : "Domex DMX 3191D PCI SCSI Controller", 0x0002 : "Domex DMX3192U/3194UP SCSI Adapter", 0x3510 : "scsi", }, 0x134D : { 0x2188 : "intel", 0x2189 : "pctel HSP56 V92 PCI Modem", 0x2486 : "V.92 MDC Modem", 0x7890 : "HSP56 MicroModem", 0x7891 : "HSP MicroModem 56", 0x7892 : "HSP56 MicroModem", 0x7893 : "HSP MicroModem 56", 0x7894 : "HSP MicroModem 56", 0x7895 : "HSP MicroModem 56", 0x7896 : "HSP MicroModem 56", 0x7897 : "HSP MicroModem 56/PCT789T", 0x9714 : "PCTEL", 0xD800 : "pctel 56k modem", 8086 : "dev", }, 0x135A : { 0x0042 : "4-port RS-232", 0x0181 : "PCI LPT and RS-232", 0x0224 : "PLX PCI Bus Logic", 0x0228 : "pq100akj9737", }, 0x135E : { 0x0EC3 : "PCIe 8 Relay Output/8 Isolated Input Board ", 0x5101 : "Route 56", 0x5102 : "RS-232 synchronous card", 0x7101 : "Single Port RS-232/422/485/520", 0x7201 : "Dual Port RS-232/422/485 Interface", 0x7202 : "Dual Port RS-232 Interface", 0x7401 : "Four Port RS-232 Interface", 0x7402 : "Four Port RS-422/485 Interface", 0x7801 : "Eight Port RS-232 Interface", 0x8001 : "Digital I/O Adapter", }, 0x1360 : { 0x0101 : "DCF77 Radio Clock", 0x0102 : "DCF77 Radio Clock", 0x0103 : "DCF77 Radio Clock", 0x0104 : "DCF77 Radio Clock", 0x0105 : "DCF77 Receiver", 0x0106 : "High Precision DCF77 Radio Clock", 0x0201 : "GPS Receiver", 0x0202 : "GPS Receiver", 0x0203 : "GPS Receiver", 0x0204 : "GPS Receiver", 0x0205 : "GPS Receiver", 0x0206 : "GPS receiver", 0x0207 : "GPS/GLONASS receiver", 0x0208 : "GPS Receiver", 0x0301 : "IRIG Timecode Reader", 0x0302 : "IRIG Timecode Reader", 0x0303 : "IRIG Timecode Reader", 0x0304 : "IRIG Timecode Receiver", 0x0305 : "IRIG Timecode Reader", 0x0306 : "IRIG Timecode Reader", 0x0501 : "PTP/IEEE1588 Slave Card", 0x0601 : "Free Running Clock", }, 0x1365 : { 0x9050 : "", }, 0x1375 : { 0x2571 : "NA", }, 0x137A : { 0x0001 : "", 0x0003 : "PCI-424 Original", 0x0004 : "PCI-424 X", 0x0005 : "PCIe-424", }, 0x1382 : { 0x0001 : "Sek'D ARC88 professional soundcard", 0x2009 : "SEK'D Prodif 96 Pro - professional audio card", 0x2048 : "Prodif Plus sound card", 0x2088 : "8-in", }, 0x1385 : { 0x4100 : "802.11b Wireless Adapter", 0x4105 : "", 0x620A : "Toshiba", 0x622A : "", 0x630A : "", 0x8169 : "Gigabit Ethernet Adapter", 0x8169 : "Fast Ethernet Adapter", 0xF311 : "Fast Ethernet Adapter", 0xF312 : "", }, 0x1387 : { 0x4640 : "sl240", 0x5310 : "SCRAMNet GT200", }, 0x1389 : { 0x0001 : "Intelligent fieldbus Adapter", 0x0104 : "PCI-CANIO adapter", }, 0x138A : { 0x0001 : "0001", 0x0005 : "0008", 0x0006 : "0006", 0x0007 : "0007", 0x0009 : "0008", 0x0011 : "11", 0x0018 : "Biometric Devices", 0x003C : "0086", 0x003D : "0104", 0x11 : "VFS5011", }, 0x1393 : { 0x1010 : "", 0x1020 : "", 0x1022 : "unknown", 0x1040 : "SmartIo", 0x1041 : "", 0x1042 : "", 0x1140 : "", 0x1141 : "", 0x1320 : "Industio", 0x1321 : "", 0x1340 : "UniversalPCI board", 0x1401 : "", 0x1680 : "Smartio", 0x1681 : "CP-168U Smart Serial Board", 0x2040 : "Intellio", 0x2180 : "Intellio Turbo PCI", 0x2210 : "---", 0x2240 : "---", 0x3200 : "Intellio Turbo PCI", 0x5020 : "", 0x6800 : "dvr capture card", }, 0x1394 : { 0x0001 : "Gigabit Ethernet Adapter", 0x1394 : "1394 Net adapter ", }, 0x1397 : { 0x0B4D : "ISDN HDLC FIFO Controller", 0x2BD0 : "ISDN HDLC FIFO Controller", 0x8B4D : "ISDN HDLC FIFO Controller", 0xB000 : "HCF-PCI card", 0xB006 : "HCF-PCI card", 0xB007 : "HCF-PCI card", 0xB008 : "usb webcam", 0xB009 : "HCF-PCI card", 0xB00A : "HCF-PCI card", 0xB00B : "HCF-PCI card", 0xB00C : "HCF-PCI card", 0xB100 : "HCF-PCI card", }, 0x139A : { 0x0007 : "Alacritech iSCSI Controller", }, 0x13A3 : { 0x0005 : "Security Processor", 0x0006 : "Public Key Processor", 0x0007 : "Security Processor", 0x0012 : "Security Processor", 0x0014 : "Security Processor", 0x0015 : "Security Processor", 0x0017 : "Security Processor", 0x0018 : "Security Processor", 0x001d : "Cryptographic Processor", 0x0020 : "Cryptographic Processor", 0x0026 : "Security Processor", 0x16 : "Security Processor", }, 0x13A7 : { 0x6240 : "BSRV2-301A", }, 0x13A8 : { 0x0152 : "Dual UART", 0x0154 : "Four Channel PCI Bus UART", 0x0158 : "Eight Channel PCI Bus UART (5V)", 0x0354 : "High Performance Quad PCI Express UART", 0x0358 : "High Performance Octal PCI Express UART", }, 0x13B6 : { 0x13b6 : "sguiu", }, 0x13C0 : { 0x0010 : "single port multiprotocol serial adapter", 0x0020 : "low speed single port multiprotocol serial adapter", 0x0030 : "4 port multiprotocol serial adapter", 0x0070 : "single port multiprotocol serial adapter", 0x0080 : "4 port multiprotocol serial adapter", 0x0090 : "one port asynchronous serial adapter", 0x00a0 : "2 port multiprotocol serial adapter", 0x0210 : "single port multiprotocol serial adapter", }, 0x13C1 : { 0x1000 : "ATA-RAID Controller", 0x1001 : "ATA-133 Storage Controller", 0x1002 : "SATA/PATA Storage Controller", 0x1003 : "SATA2 Raid Controller", 0x1004 : "PCI-Express SATA2 Raid Controller", 0x1005 : "PCI-Express SATA2/SAS Raid Controller", 0x1010 : "PCI-Express2 SAS2/SATA2 Raid Controller", }, 0x13C7 : { 0x0ADC : "Multi-Function Analogue/Digital IO card", 0x0B10 : "Parallel I/O Card", 0x0D10 : "Digital I/O Card", 0x5744 : "Watchdog Card", }, 0x13D0 : { 0x2103 : "B2C2 Sky2PC Core Chip sky star 2 <technisat>", 0x2200 : "", }, 0x13D1 : { 0xAB02 : "", 0xAB03 : "", 0xAB06 : "FE2000VX", 0xAB08 : "SMC8035TX", }, 0x13D7 : { 0x0205 : "toshiba", 0x8086 : "toshiba", }, 0x13D8 : { 0x1000 : "XaQti 1000Mbit/sec Gbit Ethernet Controller", }, 0x13DF : { 0x0001 : "Modem", }, 0x13EA : { 0x3131 : "BoSS Bit Synchronous HDLC Controller", 0x3134 : "Chateau Channelized T1/E1/HDLC Controller", }, 0x13F0 : { 0x0200 : "IP100A Integrated 10/100 Ethernet MAC + PHY", 0x0201 : "Fast Ehternet Adapter", 0x0300 : "Network Adapter", 0x1021 : "Tamarack 9021A Gigabit Ethernet adapter", 0x1023 : "Gigabit Ethernet Controller", 0x13F0 : "82131043", }, 0x13F1 : { 0x0028 : "MCP67 High Definition Audio", }, 0x13F6 : { 0211 : "serl", 0x0011 : "sound card", 0x0100 : "PCI", 0x0101 : "PCI Audio Device", 0x0111 : "PCI-SCCME8738LX-2", 0x0112 : "PCI Audio Chip", 0x0191 : "CMI 8738 8CH Sound Card", 0x0211 : " Driver controller pci simple comunications - PCtel HSP56 Micro Modem Driver ", 0x0300 : "pci audio driver", 0x111 : "C-Media Audio Controller", 0x8788 : "C-Media Oxygen HD", 0x9876 : "C-Media Audio Controller", 0x9891 : "C-Media Audio Controller", }, 0x13FD : { 0x160E : "SATA/150 &#1091;&#1089;&#1090;&#1088;&#1086;&#1081;&#1089;&#1090;&#1074;&#1072; USB 2.0", 0x161F : "s", 0x1840 : "SATA/150 device to USB 2.0 Host interface (http://www.initio.com/Html/inic-1608.html)", }, 0x13FE : { 0x1011 : "13FE", 0x1240 : "PS2134CE-0", 0x1600 : "PCI-1610CU/9-AE", 0x1680 : "PCI-1680U-A", 0x16FF : "PCI-1610CU/9-AE", 0x1713 : "PCI-1713", 0x1723 : "PCI-1723", 0x1724 : "PCI-1723", 0x1755 : "PCI-1755", 0x1760 : "amcc pci matchmaker s5920q", 0x1761 : "PCI-1751", 0x1762 : "PCI-1762", 0x1a00 : "0x03", 0x3730 : "PCM-3730I", }, 0x1400 : { 0x0001 : "", 0x0003 : "", 0x0004 : "030000", 0x1401 : "hd 2600xt", }, 0x1402 : { 0x2E00 : "Multifunction Data Aquistion card", 0x4610 : "Multi-IO board (16x 16bit ADC", 0x4650 : "Multi-IO board (16x 16bit ADC", }, 0x1407 : { 0x0100 : "Lava Dual Serial 550 PCI", 0x0101 : "Lava Quattro PCI A/B", 0x0102 : "Lava Quattro PCI C/D", 0x0110 : "Lava DSerial PCI Port A", 0x0111 : "Lava DSerial PCI Port B", 0x0180 : "Lava Octopus PCI Ports 1-4", 0x0181 : "Lava Octopus PCI Ports 5-8", 0x0200 : "LavaPort Dual-650 PCI", 0x0201 : "LavaPort Quad-650 PCI A/B", 0x0202 : "LavaPort Quad-650 PCI C/D", 0x0220 : "LavaPort Quad-650 PCI A/B", 0x0221 : "LavaPort Quad-650 PCI C/D", 0x0400 : "Lava 8255 PIO PCI", 0x0500 : "Lava Single Serial 550 PCI", 0x0510 : "Lava SP Serial 550 PCI", 0x0511 : "Lava SP BIDIR Parallel PCI", 0x0520 : "Lava RS422 SS-PCI", 0x0600 : "LavaPort 650 PCI", 0x0A00 : "COM Port Accelerator", 0x120 : "Lava Quattro 550 PCI A/B", 0x121 : "Lava Quattro 550 PCI C/D", 0x520 : "s", 0x8000 : "Lava Parallel", 0x8001 : "Lava Dual Parallel port A", 0x8002 : "Lava Dual Parallel port A", 0x8003 : "Lava Dual Parallel port B", 0x8800 : "BOCA Research IOPPAR", }, 0x1409 : { 0x1409 : "7168", 0x7168 : "1889", 0x7268 : "PCI / ISA IEEE1284 ECP/EPP/SPP/BPP Signal Chips So", 7268 : "PCI / ISA IEEE1284 ECP/EPP/SPP/BPP PAR4008A", }, 0x140B : { 0x0610 : "", 0x615 : "Na", 0x682 : "NA", }, 0x1412 : { 0x1712 : "ICE1712", 0x1724 : "VT1723", }, 0x1415 : { 0x8401 : "PCI Interface to local bus", 0x8403 : "PCI Parallel Card", 0x9500 : "Quad UART (disabled)", 0x9501 : "Quad UART", 0x9505 : "Dual UART", 0x950A : "Dual PCI UARTS", 0x950B : "Integrated High Performance UART", 0x9510 : "PCI Interface (disabled)", 0x9511 : "PCI Interface to 8-bit local bus", 0x9512 : "PCI Interface to 32-bit bus", 0x9513 : "Parallel Port", 0x9521 : "Dual UART", 0x9523 : "Integrated Parallel Port", 0xc110 : "Parallel PCI Express Card (Manhattan 158176)", 0xc158 : "Scheda PCI Express Seriale Due porte", 0xc15d : "2 native UARTs (function 1)", 0xc208 : "Quad UARTs", 0xc20d : "Quad UARTs (function 1)", 0xc308 : "Octo UARTs", 0xc30d : "Octo UARTs (function 1)", }, 0x1418 : { 0x0781 : " 802.11n Wireless Card", }, 0x141F : { 0x6181 : "MPEG decoder", }, 0x1425 : { 0x0030 : "T310 10GbE Single Port Adapter", 0x31 : "T320 10GbE Dual Port Adapter", 0x32 : "T302 1GbE Dual Port Adapter", 0x33 : "T304 1GbE Quad Port Adapter", 0x34 : "B320 10GbE Dual Port Adapter", 0x35 : "S310-CR 10GbE Single Port Adapter", 0x36 : "S320-LP-CR 10GbE Dual Port Adapter", 0x37 : "N320-G2-CR 10GbE Dual Port Adapter", 0x4401 : "T420-CR Unified Wire Ethernet Controller", 0x4402 : "T422-CR Unified Wire Ethernet Controller", 0x4403 : "T440-CR Unified Wire Ethernet Controller", 0x4404 : "T420-BCH Unified Wire Ethernet Controller", 0x4405 : "T440-BCH Unified Wire Ethernet Controller", 0x4406 : "T440-CH Unified Wire Ethernet Controller", 0x4407 : "T420-SO Unified Wire Ethernet Controller", 0x4408 : "T420-CX Unified Wire Ethernet Controller", 0x4409 : "T420-BT Unified Wire Ethernet Controller", 0x440a : "T404-BT Unified Wire Ethernet Controller", 0x440d : "T480 Unified Wire Ethernet Controller", 0x440e : "T440-LP-CR Unified Wire Ethernet Controller", 0x7145 : "N/A", }, 0x1435 : { 0x0531 : "DELETE", 0x6020 : "SPM6020", 0x6030 : "SPM6030", 0x6420 : "SPM186420", 0x6430 : "SPM176430", 0x7520 : "DM7520", 0x7540 : "SDM7540", }, 0x1446 : { 0x6A73 : "not known", }, 0x1448 : { 0x0001 : "Audio Editing", }, 0x144A : { 0x348A : "Low-profile High-Performance IEEE488 GPIB Interface Card for PCI Bus", 0x7230 : "PLX PCI-9052", 0x7248 : "PLX PCI9052", 0x7250 : "PLX PCI-9050", 0x7256 : "PCI-7256 16-CH Latching Relay & 16-CH Isolated Digital Input Card", 0x7296 : "96-ch digital I/O card", 0x7432 : "", 0x7433 : "64-ch digital Input card", 0x7434 : "", 0x7841 : "SJA 1000- baseddual port CAN bus card", 0x8133 : "Dell Wireless 5720 VZW Mobile Broadband Card", 0x8554 : "", 0x9111 : "", 0x9113 : "", 0x9114 : "", }, 0x144B : { 0x0601 : "", }, 0x1458 : { 0x1458 : "microsoft", 0x5000 : "GA-X48T-DQ6", 0x67B1 : "GA-X48T-DQ6", }, 0x145F : { 0x0001 : "Multi-axis Motion Controller", 0x0002 : "Multi-axis Motion Controller", }, 0x1462 : { 0x00C1 : "NX6800-TD256E", 0x4720 : "Audio controller", 0x5071 : "Audio controller", 0x5964 : "RADEON 9250/9200 series AGP", 0x7120 : "", 0x7960 : "MCP2T", }, 0x1471 : { 0x0188 : "ADSL PCI", }, 0x148C : { 0x4011 : "RADEON 9000 PRO EVIL COMMANDO", 0x4152 : "0x2079", }, 0x148D : { 0x1003 : "Creative ModemBlaster V.90 PCI DI5655", }, 0x148F : { 0x1000 : "Ralink Motorola BC4 Bluetooth 3.0+HS Adapter", 0x148f : "TP-LINK 7200ND", 0x2000 : "Ralink Motorola BC8 Bluetooth 3.0 + HS Adapter", 0x2070 : "802.11 g WLAN", 0x2573 : "802.11 bg", 0x2870 : "802.11 n WLAN", 0x3000 : "ralink rt3290_bluetooth_01", 0x3070 : "FreeWifiLink D3-10000N", 0x3572 : "Ralink 3572", 0x5370 : "802.11n USB Wireless LAN Card", 0x9021 : "Netopia USB b/g Adapter (black)", }, 0x1491 : { 0x0020 : "USB Fingerprint Scanner Model FS80", 0x0021 : "USB Fingerprint Scanner Model FS80", }, 0x14A9 : { 0xad1f : "1", }, 0x14B1 : { 0x0FEC : "Intel 82801EB (ICH5) rev. 02", 0x1033 : "RH56D-PCI", 0x2F30 : "zyxel omni 56k CI lus rev.", }, 0x14B3 : { 0x0000 : "DSL NIC", }, 0x14B5 : { 0x0200 : "", 0x0300 : "", 0x0400 : "", 0x0600 : "", 0x0800 : "DSP-Board", 0x0900 : "DSP-Board", 0x0A00 : "DSP-Board", 0x0B00 : "DSP-Board", }, 0x14B7 : { 0x0001 : "pci9052", }, 0x14B9 : { 0x0001 : "werwerwerwe", 0x0340 : "Cisco Systems 340 PCI Wireless LAN Adptr", 0x2500 : "Wireless PCI LAN Adapter", 0x3100 : "Wireless PCI LAN Adapter", 0x3101 : "Wireless PCI LAN Adapter", 0x3500 : "Wireless PCI LAN Adapter", 0x4500 : "Wireless PCI LAN Adapter", 0x4800 : "Wireless PCI LAN Adapter", 0xA504 : "Cisco Aironet 350 Series Mini-PCI (MPI350)", 0xA506 : "802.11b/g wireless adapter", }, 0x14C1 : { 0x8043 : "MyriNet", }, 0x14C8 : { 0x0003 : "0", }, 0x14CD : { 0x03 : "0x1212", 0x1001 : "802.11 ac wireless module", 0x168a : "Multi-Card reader", }, 0x14CF : { 0x2920 : "Serial I/O Controller aka FPMC-DFLEX64", }, 0x14D4 : { 0x0400 : "Interface chip", }, 0x14D9 : { 0x0010 : "Sturgeon HyperTransport-PCI Bridge", }, 0x14DB : { 0x2100 : "download drivers", 0x2101 : "", 0x2102 : "", 0x2110 : "OX16PCI952", 0x2111 : "", 0x2112 : "", 0x2120 : "0701 Parallel Port device", 0x2121 : "Avlab Technology PCI IO 2P", 0x2130 : "2 Port PCI Serial Card", 0x2131 : "pci serial port", 0x2132 : "", 0x2140 : "", 0x2141 : "", 0x2142 : "", 0x2144 : "", 0x2145 : "", 0x2146 : "", 0x2150 : "", 0x2151 : "", 0x2152 : "", 0x2160 : "", 0x2161 : "", 0x2162 : "", 0x2180 : "VEN_14DB&DEV_2180&SUBSYS_218014DB&REV_00", 0x2181 : "Avlab Technology Inc", 0x2182 : "Avlab Technology Inc", }, 0x14DC : { 0x0000 : "", 0x0001 : "4-port high speed RS-232", 0x0002 : "8-port high speed RS-232", 0x0003 : "2-port high speed RS-232", 0x0004 : "2-port high speed RS-422/485", 0x0005 : "2-port high speed RS-232 and RS-422/485", 0x0006 : "16-channel analog input (with timers)", 0x0007 : "16-chan 12-bit analog output (w/ timers)", 0x0008 : "4-chan 16-bit analog output (w/ timers)", 0x0009 : "24-channel digital I/O", 0x000A : "72-channel digital I/O", 0x000B : "48-channel digital I/O (w/ 6 timers)", 0x000C : "16-channel reed relay output", }, 0x14E4 : { 0x0038 : "100G packet processor ", 0x0102 : "Intel (R)", 0x0318 : "n/a", 0x034F : "???", 0x04B5 : "Broadcom 54bg Wireless", 0x0732 : "2x40G/8x10G MAC Aggregation Switch with 80G Uplink", 0x0800 : "Sentry5 Chipcommon I/O Controller", 0x0804 : "Sentry5 PCI Bridge", 0x0805 : "Sentry5 MIPS32 CPU", 0x0806 : "Sentry5 Ethernet Controller", 0x080B : "Sentry5 Crypto Accelerator", 0x080F : "Sentry5 DDR/SDR RAM Controller", 0x0811 : "Sentry5 External Interface", 0x0816 : "Sentry5 MIPS32 CPU", 0x1234 : "networkcontroller", 0x1361 : "Ethernet", 0x14E4 : "802.11b/g Wireless Lan Controller", 0x1570 : "Webcam found on Macbook Pro with Retina Display", 0x1600 : "NetXtreme BCM5752 Gigabit Ethernet PCI Express", 0x1601 : "NetXtreme Desktop/Mobile", 0x1610 : "Broadcom BCN70010 Video Decoder", 0x1612 : "Crystal HD Video Decoder", 0x1615 : "Broadcom Crystal HD Video Decoder", 0x161F : "AVC/VC-1/MPEG PCI Express HD Decoder Chipset for Netbooks/Nettops", 0x1639 : "NetXtreme Gigabit Ethernet II", 0x163B : "Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver", 0x1644 : "ven_1102dev_0004", 0x1645 : "broadtcomBCM5701 Gigabit EthernetASD", 0x1646 : "NetXtreme Gigabit Ethernet", 0x1647 : "NetLink tm Gigabit Ethernet pcie", 0x1648 : "NetXtreme Dual Gigabit Adapter", 0x164C : "Broadcom NetXtreme II Gigabit Ethernet Adapter", 0x164D : "NetXtreme Fast Ethernet Controller", 0x1650 : "Broadcom PCIe 10Gb Network Controller ", 0x1653 : "Broadcom NetXtreme Gigabit Ethernet", 0x1654 : "NetXtreme Gigabit Ethernet", 0x1658 : "NtXtreme Gigabit Ethernet", 0x1659 : "NetXtreme Gigabit Ethernet PCI Express", 0x165A : "Broadcom NetXtreme BCM5722 Gigabit", 0x165D : "Broadcom NetXtreme Gigabit Ethernet", 0x165E : "NetXtreme Gigabit Ethernet", 0x165F : "Broadcom NetXtreme 5720 Gigabit Ethernet", 0x166a : "Broadcom NetXtreme Gigabit Ethernet 5780", 0x166B : "NetXtreme Gigabit Ethernet", 0x166D : "NetXtreme Ethernet 100kB", 0x166E : "NetXtreme Gigabit Ethernet", 0x167 : "NetXtreme Fast Ethernet Controller", 0x1672 : "NetXtreme Gigabit Ethernet", 0x1673 : "NetXtreme Gigabit Ethernet", 0x1674 : "57XX Series Broadcom Driver X86/X64", 0x1676 : "NetXtreme Gigabit Ethernet", 0x1677 : "NetXtreme Desktop/Mobile", 0x1677 : "Broadcom Netxtreme Gigabit Ethernet", 0x167A : "Broadcom NetXtreme Gigabit Ethernet Controller", 0x167B : "NetXtreme Gigabit Ethernet", 0x167C : "NetXtreme Gigabit Ethernet", 0x167d : "Broadcom NetXtreme Gigabit Ethernet", 0x167E : "vierkant", 0x1680 : "NetXtreme Desktop/Mobile", 0x1681 : "Broadcom 57XX Gigabit Integrated Controller ", 0x1684 : "Broadcom NetXtreme Gigabit Ethernet", 0x1690 : "NexTreme Desktop/Mobile", 0x1691 : "Broadcom BCM57788 LOM ", 0x1691 : "Broadcom NetLink (TM) Gigabit Ethernet", 0x1692 : "NetLink", 0x1693 : "Ethernet Controller Broadcom Netlink Gigabit", 0x1696 : "Broadcom NetXtreme Gigabit Ethernet ", 0x1698 : "NetLink Ethernet-FOR DELL LAPTOP AND MAYBE OTHERS", 0x169A : "Broadcom Netlink (TM) gigabit ethernet Driver", 0x169B : "NetXtreme Gigabit Ethernet", 0x169C : "Broadcom NetLink (TM) Gigabit Ethernet", 0x169D : " BCM5789", 0x169E : "NetXtreme Gigabit Ethernet PCI Express", 0x16A6 : "Gigabit Ethernet", 0x16A7 : "Gigabit Ethernet", 0x16A8 : "NetXtreme Gigabit Ethernet", 0x16AA : "BroadCom NetExtreme II Server", 0x16B1 : "BCM57781", 0x16B5 : "Broadcom NetLink Gigabit Ethernet", 0x16BE : "CardReader Broadcom 1.0.0.221", 0x16BF : "CardReader Broadcom 15.0.7.2", 0x16C6 : "NetXtreme Gigabit Ethernet", 0x16C7 : "DELL Wireless 1390 WLAN MiniCard", 0x16DD : "NetXtreme Gigabit Ethernet", 0x16f7 : "NetXtreme BCM5753 Gigabit PCI Express", 0x16FD : "NetXtreme Gigabit Ethernet PciXpress", 0x16FE : "NetXtreme Gigabit Ethernet", 0x170C : "Broadcom 440x 10/100 Integrated Controller", 0x170D : "NetXtreme", 0x170E : "NetXtreme 100Base-TX", 0x1713 : "Broadcom NetLink (TM) Fast Ethernet", 0x21E3 : "Broadcom Bluetooth 4.0", 0x333 : "16p 1G (PHY)", 0x3352 : "BCM3352 QAMLink Single-Chip 4-Line VoIP", 0x3360 : "Advanced PHY Broadband Gateway Cable Modem", 0x4211 : "10Mb/s NIC", 0x4212 : "56k Modem", 0x4301 : "Dell Truemobile 1180 802.11g MiniPCI", 0x4303 : "BCM4301 802.11b802.11b Wireless LAN Controller", 0x4305 : "V.90 56k Modem", 0x4306 : "Unknown device 4306 (rev 02)", 0x4307 : "802.11b Wireless LAN Controller", 0x4310 : "BCM4301USB Controller", 0x4311 : "802.11b/g Wireless LAN", 0x4312 : "broadcom wireless 1490 (dell)", 0x4313 : "wireless network card", 0x4315 : "Broadcom Wireless b/g (Tested Drivers)", 0x4318 : "Broadcom 802.11b/g WLAN", 0x4320 : "802.11B/G Wireless Lan Controller 3-&#1103; &#1056;&#1077;&#1076;&#1072;&#1082;&#1094;&#1080;&#1103;", 0x4321 : "802.11a Wireless LAN Controller", 0x4322 : "UART", 0x4323 : "V.90 56k Modem", 0x4324 : "802.11a/b/g Wireless LAN", 0x4325 : "802.11b/g Wireless LAN Controller", 0x4326 : "Chipcommon I/O Controller?", 0x4328 : "Broadcom BCM43xx 1.0 (5.10.91.27)", 0x4329 : "Broadcom 802.11n Network Adapter", 0x432B : "Broadcom Wireless LAN Driver ", 0x4331 : "Broadcom BCM4331", 0x4353 : "Broadcom Half Mini PCI Express Wifi card / DL1520 (aka Dell Wireless 1520 802.11n Mini Card WLAN Dri", 0x4357 : "Broadcom WiFi 802.11b/g/n", 0x4358 : "Broadcom 802.11n WLAN module", 0x4359 : "Half-mini wireless-N card DW1530", 0x4365 : "Broadcom 43142 Wireless LAN Adapter", 0x43a1 : "Broadcom BCM4708A0", 0x4401 : "10/100 Integrated Ethernet Controller", 0x4402 : "10/100 Integrated Ethernet Controller", 0x4403 : "V.90 56k Modem", 0x4410 : "iLine32 HomePNA 2.0", 0x4411 : "V.90 56k Modem", 0x4412 : "10/100BaseT Ethernet", 0x4430 : "CardBus iLine32 HomePNA 2.0", 0x4432 : "CardBus 10/100BaseT Ethernet", 0x4610 : "Sentry5 PCI to SB Bridge", 0x4611 : "Sentry5 iLine32 HomePNA 1.0", 0x4612 : "Sentry5 V.90 56k Modem", 0x4613 : "Sentry5 Ethernet Controller", 0x4614 : "Sentry5 External Interface", 0x4615 : "Sentry5 USB Controller", 0x4704 : "Sentry5 PCI to SB Bridge", 0x4708 : "Crypto Accelerator", 0x4710 : "Sentry5 PCI to SB Bridge", 0x4711 : "Sentry5 iLine32 HomePNA 2.0", 0x4712 : "Sentry5 V.92 56k modem", 0x4713 : "Sentry5 Ethernet Controller", 0x4714 : "Sentry5 External Interface", 0x4715 : "Sentry5 USB Controller", 0x4716 : "Sentry5 USB Host Controller", 0x4717 : "Sentry5 USB Device Controller", 0x4718 : "Sentry5 Crypto Accelerator", 0x4720 : "MIPS CPU", 0x4726 : "01", 0x4727 : "Dell Wireless 1501/1503/1701 Half Mini Card Driver (used google chrome to download file)", 0x4728 : "01", 0x5334 : "16P 1G (PHY)", 0x5365 : "Sentry5 PCI to SB Bridge", 0x5600 : "StrataSwitch 24+2 Ethernet Switch Controller", 0x5605 : "StrataSwitch 24+2 Ethernet Switch Controller", 0x5615 : "StrataSwitch 24+2 Ethernet Switch Controller", 0x5625 : "StrataSwitch 24+2 Ethernet Switch Controller", 0x5645 : "StrataSwitch 24+2 Ethernet Switch Controller", 0x5670 : "8-Port 10GE Ethernet Switch Fabric", 0x5680 : "G-Switch 8-Port Gigabit Ethernet Switch Controller", 0x5690 : "12-port Multi-Layer Gigabit Ethernet Switch", 0x5691 : "GE/10GE 8+2 Gigabit Ethernet Switch Controller", 0x5802 : "The BCM5802 Security Processor integrates Broadcoms IPSec engine (DES", 0x5805 : "The BCM5805 Security Processor integrates a high-performance IPSec engine (DES", 0x5820 : "Crypto Accelerator", 0x5821 : "Crypto Accelerator", 0x5822 : "Crypto Accelerator", 0x5823 : "Crypto Accelerator", 0x5824 : "Crypto Accelerator", 0x5825 : "BCM5825", 0x5840 : "Crypto Accelerator", 0x5841 : "Crypto Accelerator", 0x5850 : "Crypto Accelerator", 0x7321 : "network card integrated", 0x7411 : "High Definition Video/Audio Decoder", 0x7865 : "Wireless-N WLAN", 0x8010 : "Next generation router SOC with gigabit switch", 0x8011 : "Next generation router SOC with gigabit switch", 0x8012 : "Next generation router SOC with gigabit switch", 0x8016 : "Next generation router SOC with gigabit switch with RGMII/SDIO", 0x8018 : "Next generation router SOC with gigabit switch with RGMII/SDIO", 0x8019 : "Next generation router SOC with gigabit switch without RGMII/SDIO", 0x8022 : "Next generation router SOC with gigabit switch with RGMII/SDIO", 0x8023 : "Next generation router SOC with gigabit switch with SATA instead of RGMII/SDIO", 0x8025 : "Next generation router SOC with gigabit switch with RGMII/SDIO", 0x8202 : "Packet Processor ASIC", 0x8334 : "24 1G", 0x8342 : "8 1G (PHY)", 0x8344 : " 24P 1G +4P 1G (PHY)", 0x8346 : "24P 1G +4P 1G/10G (PHY)", 0x8393 : "14P (1G", 0x8394 : "10P 1G + 4x1/2.5/5/10G (no PHY) ", 0x8401 : "160Gbps L2+ Ethernet Switch", 0x8402 : "160Gbps L2+ Ethernet Switch", 0x8405 : "160Gbps L2+ Ethernet Switch", 0x8406 : "160Gbps L2+ Ethernet Switch", 0x8408 : "160Gbps L2+ Ethernet Switch", 0x8411 : "160Gbps L2+ Ethernet Switch", 0x8412 : "160Gbps L2+ Ethernet Switch", 0x8415 : "160Gbps L2+ Ethernet Switch", 0x8416 : "160Gbps L2+ Ethernet Switch", 0x8418 : "160Gbps L2+ Ethernet Switch", 0x8433 : "L2+ Ethernet switch: 16P 1G", 0x8434 : "L2+ Ethernet switch: 24P 1G", 0x8442 : "L2+ Ethernet switch: 8P 1G + 4P 1G", 0x8443 : "L2+ Ethernet switch: 16P 1G + 4P 1G", 0x8444 : "L2+ Ethernet switch: 24P 1G + 4P 1G", 0x8446 : "L2+ Ethernet switch: 24P 1G +2P 1G/10G +2P 1G/10G", 0x8447 : "L2+ Ethernet switch: 24P 1G +2P 1G/10G +2P 1G/10G", 0x8448 : "L2+ Ethernet switch: 8P 1G + 8P 1G/2.5G + 4P 10G", 0x8449 : "L2+ Ethernet switch: 16P 1G + 8P 1G/2.5G + 4P 10G + 2P 20G", 0x9867 : "900000000", 0x9876 : "0x14E4", 0xA8D6 : "Broadcom 802.11n WLAN chip", 0xB061 : "160Gbps L2+ Ethernet Switch", 0xB062 : "160Gbps L2+ Ethernet Switch", 0xB063 : "160Gbps L2+ Ethernet Switch", 0xB064 : "160Gbps L2+ Ethernet Switch", 0xB150 : "Hurricane2 (Lightly Managed) 24P 1G +4P 1G/10G (PHY)", 0xb152 : "24P 1G (PHY)", 0xB160 : "L2+ switch: 24P 1G +2P 1G/10G +2P 1G/10G", 0xB161 : "L2+ Ethernet switch: 24P 1G +2P 1G/10G +2P 1G/10G", 0xB162 : "L2+ Ethernet switch: 24P 1G +4P 1G", 0xB340 : "48-port multi-layer switch with embedded CPU", 0xB450 : "100G Multi-layer Ethernet Switch", 0xB640 : "260Gbps Extensible Switch with 100GE", 0xB842 : "320Gbps Ethernet Multilayer Switch", 0xB845 : "640G Multi-layer Ethernet Switch", 0xB850 : "1.28T I/O Multi-layer Ethernet Switch", 0xB960 : "3.2T I/O Multi-layer Ethernet Switch", }, 0x14EA : { 0xAB06 : "XFNW-3603-T", }, 0x14EB : { 0x0020 : "PCI to S5U13xxxB00B Bridge Adapter", 0x0C01 : "Embedded Memory Display Controller", }, 0x14EC : { 0x16BE : "1.0.0.222_W7x86_A", }, 0x14F1 : { 0x0F00 : "HSF Generic Modem", 0x0F30 : "0x14F1", 0x1031 : "dfd", 0x1033 : "RH56D-PCI", 0x1033 : "RH56D-PCI", 0x1035 : "RH56D/SP-PCI", 0x1036 : "", 0x1056 : "subsys", 0x1059 : " DI5631", 0x10B4 : "All Conextant HFC Modems (PCI)", 0x10B6 : "Conexant HCF PCI Soft modem", 0x1416 : "Contexant", 0x1456 : "HCFp Modem", 0x14F1 : "0x14F1", 0x1611 : "AccessRunner ADSL Modem", 0x1612 : "8", 0x2013 : "in-Build CX11235 modem", 0x2400 : "unknown", 0x2702 : "HSFi or Soft V92 PCI Modem", 0x2710 : "PCI modem card Conexant", 0x2740 : "Pci Simple Communtications Controller", 0x2B10 : "0x14F1", 0x2BFA : "Conexant HDA D110 MDC v.92 Modem", 0x2C06 : "Conexant HD Audio SoftV92 Data Fax Modem with SmartCP", 0x2F00 : "HSF 56k HSFi Modem", 0x2f01 : "missing driver for pci simple communications controller", 0x2F10 : "CXT / USR 56K Fax Host int", 0x2F20 : "SoftV92 Data Fax Modem with SmartCP", 0x2F30 : "PCI SoftV92 Modem", 0x2F30 : "hp/compaq alhena 5-gl6", 0x2F40 : "PCI Soft Data Fax Modem with SmartCP", 0x2F50 : "SmartLink 2801", 0x2F52 : "013&61AAA01&0&50", 0x2F81 : " ", 0x2F82 : "Conexant PCI-E Soft Data/Fax Modem with SmartCP", 0x5045 : "http://h10025.www1.hp.com/ewfrf/wc/softwareDownloadIndex?softwareitem=ob-43284-1&lc=en&dlc=en&cc=us&", 0x5045 : "Conextant High Definition", 0x5045 : "Conextant High Definition SmartAudio 221", 0x5045 : "Conextant High Definition Audio-Venice 5051", 0x5047 : "HDAUDIO Soft Data Fax Modm- Conexant Sound Card Audio Driver", 0x5051 : "Conexant HD-Audio SmartAudio 221", 0x5051 : "Conexant HD-Audio SmartAudio 221", 0x5051 : "Conexant HD-Audio SmartAudio 221", 0x5066 : "HDAUDIOFunc_01&VEN_14F1&DEV_5069&SUBSYS_17AA214C&REV_1003", 0x5069 : "conexant 20585 smartAudio HD", 0x506C : "Conexant High Definition Audio", 0x506E : "Conexant 20672 SmartAudio HD", 0x50A1 : "Conexant HD Audio", 0x50A2 : "Conexant HD Audio", 0x5B7A : "Single-Chip MPEG-2 Encoder with Integrated Analog Video/Broadcast Audio Decoder", 0x8800 : "PAL audio/video decoder", 0x8800 : "0x14F1", 0x8801 : "PCI Broadcast Audio/Video Decoder", 0x8802 : "MPEG Encoder ", 0x8811 : "Audio Capture ike", 0x8852 : "Leadtek Winfast PxDVR3200 H (XC3028)", 0x8880 : "PCI Express Video and Broadcast Audio Decoder", 0x9876 : "Communication controller", 0x27d8 : "INTEL IDT Audio", }, 0x14F2 : { 0x0001 : "", 0x0002 : "", 0x0120 : "win7_rtm.090713-1257", 0x0121 : "", 0x0122 : "unknown", 0x0123 : "6.1.7600.16385", 0x0124 : "3103", }, 0x14F5 : { 0x2F00 : "x", }, 0x14FD : { 0x0001 : "H260u printer server for HP Printer", }, 0x1507 : { 0x0001 : "", 0x0002 : "", 0x0003 : "", 0x0100 : "", 0x0431 : "", 0x4801 : "", 0x4802 : "", 0x4803 : "", 0x4806 : "", }, 0x1516 : { 0x0800 : "PCI Ethernet controller", 0x0803 : "PCI Ethernet controller", 0x0891 : "PCI Ethernet controller", }, 0x1519 : { 0x0020 : "HSIC Device", 0x2004 : "PCI Interface bus", }, 0x151A : { 0x1002 : "4341", 0x1004 : "", 0x1008 : "", }, 0x151B : { 0x9080 : "combox cb 300a", }, 0x151D : { 0x9876 : "?", }, 0x151F : { 0x0001 : "TOPIC FM-56PCI-TP", 0x0568 : "56k Internal Data Fax Voice Modem", }, 0x1522 : { 0x0100 : "PCI Interface Chip", }, 0x1523 : { 0x8 : "Content Addressable Memory", }, 0x1524 : { 0x0751 : "pci", 0x0100 : "ENE CIR Receiver ", 0x0510 : "PCI Memory Card Reader Controller", 0x0530 : "Memory Stick Card Reader", 0x0550 : "Secure Digital Card Reader", 0x0551 : "ven1524&dev_0551&SUBSYS_009F1025&REV_01", 0x0555 : "ven1524&dev_0551&SUBSYS_009F1025&REV_01", 0x0610 : "PCI Smart Card Reader Controller", 0x0730 : "CardBus Controller", 0x100 : "ENE CIR Receiver", 0x1025 : "PCIVEN_127a&DEV_1025&SUBSYS_1025123A&REV_014&1351887D&0&58F0", 0x1211 : "CardBus Controller", 0x1225 : "CardBus Controller", 0x1410 : "CardBus Controller", 0x1411 : "pci device", 0x1412 : "Cardbus Controller", 0x1420 : "CardBus Controller", 0x1421 : "CardBus Controller", 0x1422 : "CardBus Controller", 0x510 : "PCI Memory Card Reader Controller", 0x551 : "ven1524&dev_0551&SUBSYS_009F1025&REV_01", 0x9876 : "1941", 0xFC10 : "pci device", }, 0x152D : { 0x2329 : "J micron JM20329", 0x2519 : "JMicron Technology Corp. / JMicron USA Technology Corp", }, 0x152E : { 0x2507 : "0", }, 0x1538 : { 0x0301 : "Tekram DC200 PATA100 RAID Controller", 0x0303 : "ARS0304S PATA133 RAID5", }, 0x153B : { 0x1115 : "IC Ensemble Inc ICE1712 Envy24 Multichannel Audio Controller", 0x1143 : "Philips Semiconductors SAA7134HL Multimedia Capture Device", 0x6003 : "CrystalClear SoundFusion PCI Audio Accel", }, 0x153F : { 0xdead : "xx12345", }, 0x1540 : { 0x9524 : "PAL/SECAM TV card w/ FM1216ME MK3 tuner (+FM radio)", }, 0x1543 : { 0x1052 : "Modem Intel 537EP (Chipset KAIOMY)", 0x3052 : "Modem Intel 537EP (Chipset KAIOMY)", 0x3155 : "ModemDeviceonHighDefinitionAudioBus", }, 0x1549 : { 0x80FF : "PCI/ISA Bus Bridge", }, 0x154A : { 0x9016 : "USB DVB-T Device AF9015", 0x9876 : "USB DVB-T Device CE950081", }, 0x154B : { 0x3038 : "USB", }, 0x1555 : { 0x0002 : "Easylon PCI Bus Interface", }, 0x1556 : { 0x5555 : "an cpci application", }, 0x1558 : { 0x1558 : "gtx 670mx GPU", }, 0x155E : { 0x0020 : "Multi Function Card Version 3", }, 0x1562 : { 0x0001 : "LA-41x3", 0x0002 : "LA-5030", 0x0003 : "LA-5033", }, 0x156A : { 0x5000 : "Wideband Advanced Signal Processor", 0x5100 : "High Data Rate Radio", }, 0x1571 : { 0xA001 : "GHB", 0xA002 : "ARCnet", 0xA003 : "ARCnet", 0xA004 : "ARCnet", 0xA005 : "ARCnet", 0xA006 : "ARCnet", 0xA007 : "ARCnet", 0xA008 : "SONY", 0xA009 : "5 Mbit ARCnet", 0xA00A : "5 Mbit ARCnet", 0xA00B : "5 Mbit ARCnet", 0xA00C : "5 Mbit ARCnet", 0xA00D : "5 Mbit ARCnet", 0xA00E : "ARCNET", 0xA201 : "10 Mbit ARCnet", 0xA202 : "10 Mbit ARCnet", 0xA203 : "10 Mbit ARCnet", 0xA204 : "10 Mbit ARCnet", 0xA205 : "10 Mbit ARCnet", 0xA206 : "10 Mbit ARCnet", }, 0x157C : { 0x8001 : "PCI Y2K Compliance Card", }, 0x1584 : { 0x5054 : "VAS Vetronix Automotive Service", 4003 : "VAS Vetronix Automotive Service", }, 0x1586 : { 0x0803 : "", }, 0x1588 : { 0x1100 : "PAX.ware 1100 dual Gb classifier engine", 0x2000 : "SNP 8023 packet classifier - AMD component", 0x8023 : "PAX.ware 100 packet classifier", }, 0x158B : { 0x0005 : "Standar HSP Modem", 0x0015 : "Standar HSP Modem Series", }, 0x1592 : { 0x0781 : "Multi-IO Card", 0x0782 : "Parallel Port Card (EPP)", 0x0783 : "Multi-IO Card", 0x0785 : "Multi-IO Card", 0x0786 : "Multi-IO Card", 0x0787 : "Multi-IO Card 2 series", 0x0788 : "Multi-IO Card", 0x078A : "Multi-IO Card", }, 0x15A2 : { 0x0001 : "PCI Bus Analyzer/Exerciser", }, 0x15AD : { 0x0405 : "VMWARE SVGA II", 0x0710 : "Virtual SVGA", 0x0720 : "VMware PCI Ethernet Adapter", 0x0740 : "VMW5858are VMCI Bus Device", 0x0770 : "Standard Enhanced PCI to USB Host Controller", 0x0778 : "Sabrent USB-to-Parallel Adapter", 0x07B0 : "VMware vSphere 4 PCI Ethernet Adapter", 0x0801 : "PCI Memory Controller", 0x1975 : "High Definition Audio Codec", 0x1977 : "High Definition Audio Controller", }, 0x15B0 : { 0x0001 : "Pctel", 0x0003 : "Pctel", 0x2BD0 : "soft56k voice", }, 0x15B3 : { 0x5274 : "InfiniBridge", 0x5A44 : "InfiniHost I", 0x6274 : "InfiniHost III Lx", 0x6278 : "InfiniHost TM III Ex", 0x6282 : "MT25218 [InfiniHost III Ex]", 0x634A : "Mellanox ConnectX VPI (MT2548) - PCIe 2.0 2.5GT/s", 0x6732 : "ConnectX VPI (MT26418) - PCIe 2.0 5GT/s", }, 0x15B8 : { 0x3009 : "Analog output board", }, 0x15BC : { 0x0101 : "DX2+ FC-AL Adapter", 0x0103 : "4 Port Fibre Channel Controller", 0x0B01 : "Agilen PCI-GPIB", 0x1200 : "Agilent QX4 Fibre Channel Controller", 0x2530 : "HP Communications Port", 0x2531 : "HP Toptools Remote Control Adapter", 0x2532 : "HP Toptools Remote Control Adapter", 0x2929 : "PCI/PCI-X Bus Analyzer", }, 0x15C2 : { 0x0038 : "part of the iMon-IR-RC-Display-Kit", }, 0x15D1 : { 0x0001 : "TriCore 32-bit Single-chip Microctrlr", 0x0003 : "6 Port Optimized Comm Ctrlr (SPOCC)", 0x0004 : "Infineon Technologies AG", 0x000B : "TPM", }, 0x15D8 : { 0x9001 : "", }, 0x15D9 : { 0x9876 : "4567", }, 0x15DC : { 0x0001 : "PCI Cryptography Module", }, 0x15DD : { 0x7664 : "idt high audio", 0x7680 : "SIGMATEL STAC 92XX C-Major HD Audio", 0x769 : "9200 HD ", 0x7690 : "You'll Love me 4 this/ visit http://wendhelofopportunity.info Support Me!", 0x8384 : "Intel Audio Studio", 0x9876 : "1", }, 0x15E0 : { 0x7134 : "01", }, 0x15E2 : { 0x0500 : "Internet PhoneJack PCI Card", }, 0x15E6 : { 0x0000 : "v.90 Lucent Modem", }, 0x15E7 : { 0x755 : "NTDS Parallel Adapter", }, 0x15E8 : { 0x0130 : "Wireless NIC", 0x0131 : "InstantWave HR PCI card", }, 0x15E9 : { 0x1841 : "ATA controller", }, 0x15EF : { 0x0028 : "SigmaTelHigh Definition Audio CODEC", 0x24c5 : "VIA-Vynil v700b", 0x7616 : "SigmaTelHigh Definition Audio CODEC", }, 0x15F1 : { 0x2F30 : "Conexant HSFi", }, 0x15F2 : { 0x0001 : "Spot RT", 0x0002 : "Spot RT #2", 0x0003 : "Spot Insight", }, 0x160A : { 0x3184 : "Via VT6656 Wireless Lan Adapter", }, 0x1616 : { 0x0409 : "16-Bit", }, 0x1619 : { 0x0400 : "Two Port Intelligent Sync Comms Card", 0x0440 : "Four Port Intelligent Sync Comms Card", 0x0610 : "One Port Intelligent Sync Comms Card", 0x0620 : "Two Port Intelligent Sync Comms Card", 0x0640 : "Four Port Intelligent Sync Comms Card", 0x1610 : "One Port Intelligent Sync Comms Card", 0x1612 : "One Port Intelligent Sync Comms Card", 0x2610 : "G.SHDSL Intelligent Sync Comms Card", 0x3640 : "Four Port Intelligent Sync Comms Card", 0x4620 : "Two Port Intelligent Sync Comms Card", 0x4640 : "Four Port Intelligent Sync Comms Card", 0x5621 : "Two Port Intelligent Sync Comms Card", 0x5641 : "Four Port Intelligent Sync Comms Card", 0x6620 : "Two Port Intelligent Sync Comms Card", }, 0x1621 : { 0x0020 : "4 in/4 out Professional Digital Audio Card", 0x0021 : "2 in/6 out Professional Digital Audio Card", 0x0022 : "6 in/2 out Professional Digital Audio Card", 0x0023 : "2 in/2 out Professional Digital Audio Card", 0x0024 : "16 in/16 out AES/EBU Audio Card", 0x0025 : "16 in/16 out AES/EBU Audio Card w/SRC", }, 0x1629 : { 0x1003 : "Format Synchronizer v3.0", 0x2002 : "Fast Universal Data Output", }, 0x162D : { 0x0100 : "Repeographics controller", 0x0101 : "Reprographics Controller", 0x0102 : "Reprographics Controller", 0x0103 : "Reprographics Controller", }, 0x162F : { 0x1111 : "General Purpose Relay Card", 0x1112 : "Matrix Card", }, 0x1638 : { 0x1100 : " WL11000P", }, 0x163B : { 0x2416 : "DVR Video Capture Card 16CH", }, 0x163C : { 0x3052 : "RS56/HSP-PCI", 0xFF02 : "PCI Bridge - 244E", }, 0x164F : { 0x0001 : "PCI interface chip", 0x0002 : "PCI interaface chip", }, 0x1657 : { 0x0646 : "Brocade 400 series PCIe HBA", }, 0x1658 : { 0x0704 : "DIG 704 PCI - Interface with Millisecond Timer and Interrupts", }, 0x165A : { 0xC100 : "PCI camera link video capture board", 0xD200 : "PCI digital video capture board", 0xD300 : "PCI digital video capture board", 0xF001 : "PCI-E camera link video capture board", }, 0x165C : { 0x0002 : "FT232BL", }, 0x165F : { 0x2000 : "16 Channel Audio Capture Card", }, 0x1668 : { 0x0100 : "PCI to PCI Bridge", }, 0x166D : { 0x0001 : "", 0x0002 : "MIPS BCM1125/BCM1250 processors", }, 0x1676 : { 0x1001 : "Realtek AC' 97 Audio Driver", }, 0x1677 : { 0x20ad : "Profibus DP / K-Feldbus / COM", }, 0x167F : { 0x4634 : "FOB-IO Card", 0x4C32 : "L2B PCI Board", 0x5344 : "FOB-SD Card", 0x5443 : "FOB-TDC Card", 0xF0B1 : "ibaFOB-io-D", 0xF0B2 : "ibaFOB-2io-D", 0xF0B4 : "ibaFOB-4io-D", 0xF1B2 : "ibaFOB-2i-D", 0xF5DE : "ibaFOB-SDexp", 0xFDCE : "ibaFOB-TDCexp", 0xFEC1 : "ibaFOB-io-ExpressCard", }, 0x1681 : { 0x0050 : "Hercules WiFi PCI 802.11G", }, 0x1682 : { 0x2931 : "unknown", 0x9875 : "779A", }, 0x1688 : { 0x0013 : "", }, 0x168C : { 0x001c : "pciven_10ac&dev_ooo", 0x0002 : "Atheros AR5B95 Wireless LAN 802.11 a/b/g/n Controller", 0x0003 : "TP-LINK 450Mbps Wireless N Adapter", 0x0007 : "ROOTMS_SSTPMINIPORT0000", 0x0011 : "11L/b/g Wireless LAN Mini PCI Adapter", 0x0012 : " PCIVEN_1217&DEV_7130&SUBSYS_FF501179&REV_01 DELL Latitude C510 as mini-PCI board behind the larg", 0x0013 : "Netgear RangeMax WPN311 PCI Wireless NIC", 0x0019 : "802.11a Wireless Adapter", 0x001A : "http://support1.toshiba-tro.de/tools/updates/atheros-wlan/atheros-wlan-xp-7702331.zip", 0x001B : "802.11abg NIC", 0x001c : "Atheros AR5007EG Wireless Network Adapter", 0x001C : "Atheros AR5BXB63 WWAN Chip", 0x001c : "AR5006EX AR5423a", 0x001D : "PCIVEN_168C&DEV_002B&SUBSYS_E034105B&REV_014&124A40C8&0&00E1", 0x002 : "PCI/VEN_168C&DEV_002B&SUBSYS_7173144F&REV_014&200004B7&0&00E1", 0x0023 : "802.11a/b/g/n&#1041;&#1077;&#1089;&#1087;&#1088;&#1086;&#1074;&#1086;&#1076;&#1085;&#1086;&#1081; PC", 0x0024 : "Atheros 802.11a/b/g/n", 0x0027 : "Atheros AR5B95 Wireless LAN 802.11 a/b/g/n Controller", 0x002A : "Wireless Network Adapter", 0x002B : "Atheros AR5B95 ", 0x002C : "Wireless 802.11 a/b/g/n WiFi Adapter (PCI-Express)", 0x002D : "802.11b/g/n", 0x002E : "Atheros ar9287 PCI &#1052;&#1086;&#1078;&#1083;&#1080;&#1074;&#1086;&#1089;&#1090;&#1110;: &#1047;&#", 0x0030 : "Killer Wireless - N", 0x0032 : "Atheros AR9485", 0x0034 : "802.11a/b/g/n", 0x0036 : "Qualcomm Atheros AR956x Wireless Network Adapter", 0x0037 : "Atheros AR1111 WB-EG Wireless Network Adapter", 0x003e : "Wireless Network Adapter", 0x007 : "Wireless Network Adapter", 0x0280 : "PCIVEN_168C&DEV_002B&SUBSYS_30AF185F", 0x032 : "Dell Wireless DW1703 802.11b/g/n", 0x1014 : "Atheros AR5212 802.11abg wireless Drivers", 0x14F1 : "PCIVEN_168C&DEV_001A&SUBSYS_04181468&REV_014&FCF0450&0&10A4", 0x168C : "Qualcomm Atheros AR9485WB-EG Wireless Network Adapter", 0x1a3b : "802.11a/b/g/n Wireless PCI Adapte", 0x3002 : "Bluetooth 3.0", 0x6666 : "Atheros AR5B95 Wireless LAN 802.11 a/b/g/n Controller", 0x9876 : "Atheros AR5B95 Wireless LAN 802.11 a/b/g/n Controller", 0xFF1B : "Wireless LAN G", 0xFF96 : "LAN-Express AS IEEE 802.11g miniPCI adapter", }, 0x1690 : { 0x0742 : "BCM2070", }, 0x1693 : { 0x0212 : "EPONINE ESR-PCI Board", 0x0213 : "EPONINE MTM120 PCI Board", }, 0x16AE : { 0x000A : "Crypto Accelerator", 0x1141 : "Crypto Accelerator", }, 0x16CA : { 0x0001 : "Solid State Disk", }, 0x16EC : { 0x0116 : "RealTek 8169S chip", 0x0303 : "U.S. Robotics 56K FAX USB V1.1.0 / V.92 USB modem", 0x1007 : "U.S. Robotics 56K Win INT", 0x2013 : "U.S. Robotics 56K Voice Host Int", 0x2F00 : "http://www.usr.com/support/product-template.asp?prod=5660a", 0x2f12 : "U.S.Robotic (A- Modem/PCI)", 0x3685 : "Wireless Access Adapter Model 022415", 0x5685 : "U.S. Robotics 56K Voice Host Int (A-Modem/ PCI)", }, 0x170B : { 0x0100 : "Crypto Aceletator", }, 0x1710 : { 0x5812 : "itech numeric small keyboard", 0x9835 : "2 serial", }, 0x1712 : { 0x3038 : "usb", 0x7130 : "unknown", }, 0x1725 : { 0x7174 : "VSC7174 PCI/PCI-X SATA Controller", }, 0x1734 : { 0x007a : "ATI Rage XL (rev 27)", 0x1011 : "Adaptec AIC-7902 Dual Channel U320 SCSI", 0x1012 : "Serverworks Southbridge with RAID/IDE (rev a0)", 0x1013 : "Broadcom Corp. NetXtreme Gigabyte Ethernet", 0x10b9 : "SAS 3000 series", }, 0x1737 : { 0x0071 : "Dual Band Wireless N USB Network Adapter", 0x1032 : "Linksys Instant Gigabit Desktop Network Interface", }, 0x173B : { 0x03E8 : "Gigabit Ethernet Adapter", 0x03EA : "Gigabit Ethernet Adapter", }, 0x1743 : { 0x8139 : "Fast Ethernet Adapter with ROL", }, 0x174B : { 0x0260 : "Saphire Radeon 9250", 0x0261 : "Sapphire Radeon 9250 - Secondary", 0x7176 : "RADEON 9000 ATLANTIS PRO", 0x7177 : "RADEON 9000 ATLANTIS PRO - Secondary", 0x71C6 : "ATI RADEON X1650 Series", 0x7244 : "Sapphire ATI X1950 XT", 0x7C12 : "RADEON 9200 ATLANTIS - Secondary", 0x7C13 : "RADEON 9200 ATLANTIS", 0x9501 : "ATI Radeon HD 3450", 0xE106 : "Graphics Chipset ATI Radeon HD 4300/4500 Series ", 0xe131 : "ATI 4870", 0xE140 : "Sapphire HD 5870 1GB GDDR5", }, 0x1753 : { 0x1001 : "VP500", 0x1004 : "VP1000", }, 0x1755 : { 0x000 : "", 0x0000 : "Au1500 Processor", }, 0x17A1 : { 0x0128 : "USB2.0 JPEG WebCam ", }, 0x17AA : { 0x0106 : "Intel Sandy Bridge-MB GT1 - Integrated Graphics Controller [D2/J1/Q0] [Lenovo", 0x7145 : "Mobility ATI Radeon X1400", }, 0x17AF : { 0x4150 : "200", 0x7291 : "RV560", }, 0x17C0 : { 0x12ab : "intel", }, 0x17CC : { 0x1978 : "usb 2.0 device controller", 0x2280 : "USB 2.0 Device Controller", }, 0x17D5 : { 0x5831 : "Xframe 10GbE PCI-X Adapter", 0x5832 : "Xframe II 10GbE PCI-X 2.0 Adapter", 0x5833 : "E3100 PCI-Express 10Gb Ethernet Interface", }, 0x17db : { 0x0101 : "NA", 0x0201 : "NA", 0x0301 : "NA", }, 0x17E9 : { 0x02a7 : "USB VGA/DVI Adapter UV-D4A1-B", 0x4318 : "Dell GigabitEthernet (USB-G1000)", }, 0x17EE : { 0x4153 : "RV350", }, 0x17F3 : { 0x1010 : "D1010", 0x1011 : "D1011", 0x1030 : "M1030", 0x2010 : "M2010", 0x3010 : "M3010", 0x6021 : " ", 0x6036 : " ", 0x6040 : "R6040x", 0x6060 : " modem", 0x6061 : " V90479. 1", }, 0x17FE : { 0x2220 : "Generic IEEE 802.11b/g Wireless LAN Card", }, 0x1813 : { 0x3059 : "AC97 Enhanced Audio Controller - the 8251 controller is different", 0x4000 : "intel V.92 HaM Modem", 0x4100 : "Intel HaM V.92 Modem", }, 0x1814 : { 0x0001 : "...B742000", 0x0101 : "2460 802.11b", 0x0201 : "PCIVEN_1814&DEV_3298&SUBSYS_1451033", 0x0201 : "WiFiSKY", 0x0201 : "001167F044E5", 0x0201 : "RT2560F", 0x0201 : "WMP54G", 0x0301 : "RT2561", 0x0301 : "RT2561", 0x0302 : "RT2525 2.4GHz transceiver + RT2560 MAC/BBP", 0x0401 : "RT 2661", 0x0601 : "RT2860T", 0x0701 : "RT2860T", 0x0781 : "RT2790T/RT2860/RT2890/RT2700E", 0x1418 : "0x14FI", 0x14F1 : "0x1814", 0x201 : "25601814&REV_01", 0x3060 : "RT3060", 0x3090 : "Ralink RT3090", 0x3290 : "1010", 0x3298 : "-3290", 0x3592 : "RT3592", 0x5360 : "RT5360 ", 0x5390 : "ASUS X55VD VEN_1814&DEV_5390&SUBSYS_E054105B&REV00", 0x539B : "RT5390R", 0x9876 : "b8341462", }, 0x186C : { 0x1014 : "Atheros 802.11abg", }, 0x1888 : { 0x0301 : "", 0x0601 : "", 0x0710 : "", 0x0720 : "", 0x2503 : "Video Capture (10 bit High qualtiy cap)", 0x2504 : "Video Capture", 0x3503 : "VGA Geforce4 MX440", 0x3505 : "VGA Geforce4 Ti4200", }, 0x18C9 : { 0x1011 : "Video processor", 0x1012 : "Video processor", 0x1013 : "Video processor", 0x1014 : "Video processor", 0x1015 : "Video processor", 0x1016 : "Video processor", 0x2011 : "Framegrabber", 0x2012 : "Framegrabber", 0x2013 : "Framegrabber", 0x2014 : "Framegrabber", 0x2015 : "Framegrabber", 0x2016 : "Framegrabber", 0x2017 : "Framegrabber", 0x2021 : "Framegrabber", 0x3011 : "Video Output Board", }, 0x18CA : { 0x0020 : "Volari Z series (Select GPU Graphic Drivers", 0x0040 : "Volari Family Z Series", }, 0x18F7 : { 0x0001 : "ESCC-PCI-335", 0x0002 : "422/4-PCI-335", 0x0004 : "422/2-PCI-335", 0x000a : "232/4-PCI-335", }, 0x1904 : { 0x2031 : "silan sc92031 Network adapter", 0x8139 : "Realtek RTL8139 PCI Fast Ethernet Adapter", 0x8139 : "Realtek RTL8139 PCI Fast Ethernet Adapter", }, 0x1910 : { 0x0001 : "Seaway Network Content Accelerator", }, 0x1912 : { 0x0014 : "usb3.0 renesas", 0x0015 : " nec", 0x0015 : "Renesas Electronics USB 3.0 Host Controller", }, 0x1969 : { 0x1026 : "PCI-E ETHERNET CONTROLLER ", 0x1048 : "Gigabit Ethernet 10/100/1000 Base-T Controller", 0x1060 : "PCI-E Fast Ethernet Controller", 0x1062 : "Atheros AR8132 PCI-E&#1050;&#1086;&#1085;&#1090;&#1088;&#1086;&#1083;&#1083;&#1077;&#1088; &#1041;&#", 0x1062 : "Gigabit Ethernet 10/100/1000 Base-T Controller", 0x1063 : "Atheros AR8131 PCI-E Gigabit Ethernet Controller", 0x1073 : "Atheros AR8151", 0x1083 : "Atheros AR8151 PCI-E Gigabit Ethernet Controller (NDIS 6.20)", 0x1090 : "Fast Ethernet", 0x1091 : "PCI-E Gigabit Ethernet Controller", 0x10a0 : "Controller Ethernet Qualcomm Atheros AR8172/8176/8178 PCI-E Fast (NDIS 6.30)", 0x168c : "Gigabit Ethernet 10/100/1000 Base-T Controller ", 0x1969 : "Atheros AR815x/816x Ethernet Controller Driver ", 0x2048 : "Fast Ethernet 10/100 Base-T Controller", 0x2049 : "der", 0x2060 : "AR8152 v1.1 Fast Ethernet", 0x2061 : "Ethernet Controller", 0x2062 : "Qualcomm Atheros AR8152/8158", 0x4747 : "VEN_1969", 0x9876 : "Fast Ethernet 10/100 Base-T Controller ", 0xE091 : "Killer E2200 Network Card", }, 0x1971 : { 0x0001 : "PCIVEN_1971&DEV_0000&SUBSYS_0003105B&REV_00", 0x1011 : "PCIVEN_1971&DEV_1011&CC_FF00", 0x1021 : "", }, 0x197B : { 0x0250 : "JMC250 PCI Express", 0x0256 : "JMC260 PCI Express Fast Ethernet", 0x0260 : "JMC260 PCI Express Fast Ethernet", 0x0261 : "JMB38X MS Host Controller", 0x1234 : "1234567", 0x197b : "JMB38X SD/MMC ", 0x2360 : "JMB36X", 0x2361 : "PCI Express to SATA II and PATA Host Controller", 0x2363 : "JMicron JMB362/JMB363 AHCI Controller", 0x2366 : "JMicron JMB366 AHCI/IDE Controller", 0x2368 : "IDE Comtroller", 0x2380 : "IEEE 1394 Host Controller", 0x2381 : "JMB38X SD Host Controller", 0x2382 : "JMB38X SD/MMC Host Controller", 0x2383 : "JMB38X MS Host Controller", 0x2384 : "JMB38X xD Host Controller", 0x2391 : "JMB38X SD/MMC - JMicron PCIe SD Host Controller", 0x2392 : " JMB38X SD/MMC - JMicron PCIe SD Host Controller", 0x2393 : "Chip Description: JMB38X SD/MMC Host Controller", 0x7002 : "JMB38X SD/MMC Host Controller", }, 0x198a : { 0x0210 : "XMC-210", 0x0220 : "XMC-220", 0x0230 : "XMC-230", 0x0240 : "XMC-240", 0x1180 : "PCIe-180", 0x1280 : "PCIe-280", 0x1395 : "PCIe-395 standard", 0x198a : "PCIe-180", 0x3395 : "PCIe-395 BSP", 0x402F : " BenNUEY PCIX", 0x4030 : "H100-PCIX", 0x4031 : "BenNUEY PCI-104-V4", 0x4032 : "BenONE-PCIe", 0x4033 : "BenONE-Xilinx-Kit-ROHS", 0x4034 : "BenNUEY PCIX RoHS", }, 0x19a2 : { 0x0710 : "Emulex OneConnect 10Gb NIC (be3) (rev01)", 0x0712 : "Emulex OneConnect 10Gb iSCSI Initiator (be3) (rev 01)", 0x0714 : "Emulex OneConnect 10Gb FCoE &#1048;&#1085;&#1080;&#1094;&#1080;&#1072;&#1090;&#1086;&#1088; (be3) (&", }, 0x19AC : { 0x0001 : "Crypto Accelerator", }, 0x19B6 : { 0x110c : "Atheros chipset for 802.11a/b/g", }, 0x19E3 : { 0x5801 : "DDRdrive X1", 0x5808 : "DDRdrive X8", 0xDD52 : "DDRdrive X1-30", }, 0x1B13 : { 0x0001 : "nVidia Corporation NV17", }, 0x1B21 : { 0x0612 : "Link Below to Mainboard Driver inside", 0x1041 : "USB 3.0 Host Controller Driver for Windows 7", 0x1042 : "Asmedia ASM104x USB 3.0 Host Controller", }, 0x1B6F : { 0x7023 : "Etron USB 3.0 Extensible Host Controller", }, 0x1B73 : { 0x1000 : "PCIVEN_1000&DEV_0020&SUBSYS_10301000&REV_01PCIVEN", 0x1009 : "USB3.0 host controller", 0x1100 : "USB 3.0 eXtensibile Host controller", }, 0x1c39 : { 0x0300 : "Pegasus Board PCI-e interface", }, 0x1DE1 : { 0x0045 : "Tekram SAS860 Embedded 8xSAS/SATAII RAID", 0x0058 : "Tekram Elrond 8xSAS/SATAII RAID", 0x0391 : "SCSI ASIC", 0x2020 : "SCSI Controller", 0x690C : "IDE Cache Controller", 0xDC29 : "Bus Master IDE PCI 2 controllers", }, 0x2001 : { 0x3C19 : "USB <=> Wireless N 150 Adapter", 0xF103 : ".driver", }, 0x2646 : { 0x0001 : "22323", 0x2646 : "22323", }, 0x3388 : { 0x0020 : "Universal PCI-PCI Bridge (transparent mode)", 0x0021 : "ZS095A0", 0x0022 : "PCI-PCI Bridge", 0x0026 : "Universal PCI-PCI Bridge (transparent mode)", 0x0028 : "Dual Mode PCI-X-to-PCI-X Bridge (transparent mode)", 0x0029 : "Dual Mode PCI-X-to-PCI-X Bridge (non-transparent mode)", 0x0030 : "Transparent PCI-X-to-PCI-X Bridge", 0x0031 : "Synchronous 32-Bit", 0x8011 : "CPU to PCI Bridge", 0x8012 : "PCI to ISA Bridge", 0x8013 : "EIDE Controller", }, 0x3D3D : { 0x0001 : "GLint 300SX", 0x0002 : "GLint 500TX", 0x0003 : "GLint", 0x0004 : "3C0SX", 0x0005 : "Permedia", 0x0006 : "GLint MX", 0x0007 : "3D Extreme", 0x0008 : "GLint Gamma G1", 0x0009 : "Permedia2v", 0x000A : "8086", 0x000C : "Permedia 3", 0x000D : "GLINT R4", 0x000E : "GLINT Gamma G2", 0x0020 : "0x0024", 0x0030 : "0x030000", 0x0100 : "Permedia II", 0x1004 : "Permedia", 0x3D04 : "Permedia", 0x3D07 : "same as above? I have no idea", 0xFFFF : "GLint VGA", }, 0x4005 : { 0x0300 : "PCI Audio Device", 0x0308 : "PCI Audio Device + modem", 0x0309 : "PCI Input Controller", 0x1064 : "GUI Accelerator", 0x2064 : "GUI Accelerator", 0x2128 : "GUI Accelerator", 0x2301 : "GUI Accelerator", 0x2302 : "GUI Accelerator", 0x2303 : "GUI Accelerator", 0x2364 : "GUI Accelerator", 0x2464 : "GUI Accelerator", 0x2501 : "GUI Accelerator", 0x4000 : "Audio Chipset", }, 0x4144 : { 0x0040 : "Virtex-E Bridge", 0x0041 : "Virtex-II Bridge", 0x0042 : "Virtex-II Bridge", 0x0043 : "Virtex-II Pro Bridge", 0x0044 : "Virtex-II Pro PCI/PCI-X Bridge", 0x0045 : "Virtex-II Bridge", 0x0046 : "Virtex-II Bridge", 0x0049 : "Virtex-II Pro PCI", 0x004A : "Virtex-II Pro PCI-X Bridge", 0x004F : "Virtex-II Pro PCI-X Bridge", 0x0050 : "Virtex-4LX Bridge", 0x0051 : "ADM-XRC-5T1", 0x0052 : "Xilinx Virtex 5 PMC", 0x0056 : "Virtex 5 AMC FPGA board", 0x0057 : "Xilinx Virtex 5 FPGA PMC ", 0x0058 : "VXS FPGA and PMC Carrier Board", 0x005B : "ADM-XRC-5T2 with JPEG 2000 devices", 0x005C : "FPGA PMC with Analog I/O Interface", 0x005F : "As per XRC-5T2 but with 6 JPEG2000 devices", 0x0300 : "Xilinx Virtex 6 FPGA XMC", 0x0301 : "Xilinx Virtex 6 FPGA XMC", 0x0303 : "Full lenght PCI Express Xilinx Virtex-6 FPGA platform", 0x0305 : "Full length PC Card Xilinx Virtex-6 FPGA platform", }, 0x416C : { 0x0100 : "Puerto paralelo PCI", 0x0200 : "", }, 0x4348 : { 0x1453 : "WCH353L", 0x3253 : "SIE9835 PCI=>DUAL SERIAL", 0x5053 : "4980", 0x7053 : "PCI Serial Port", 0x7173 : " CH35X", }, 0x4C53 : { 0x0000 : "Diagnostics Device", 0x0001 : "Diagnostics Device", }, 0x4D51 : { 0x0200 : "", }, 0x4E8 : { 0x618c : "usb lan adapter4", 0x618d : "usb lan adapter4", }, 0x5053 : { 0x2010 : "Daytona Audio Adapter", }, 0x5136 : { 0x4678 : "S S Technologies", }, 0x5143 : { 0x9204 : "WAN Card Lenovo Notebook", }, 0x5333 : { 0x0551 : "86C551", 0x5333 : "S3 86c765", 0x5631 : "86C325", 0x8800 : "86C866", 0x8801 : "86C964", 0x8810 : "86C732-P", 0x8811 : "8622mcq04", 0x8812 : "86CM65?", 0x8813 : "86C764", 0x8814 : "86C767", 0x8815 : "86CM66", 0x883D : "86C988", 0x8870 : "Fire GL", 0x8880 : "86C868", 0x8881 : "86C868", 0x8882 : "86C868", 0x8883 : "86C868", 0x88B0 : "86C928", 0x88B1 : "86C928", 0x88B2 : "86C928", 0x88B3 : "86C928", 0x88C0 : "86C864", 0x88C1 : "86C864", 0x88C2 : "86C864", 0x88C3 : "86C864", 0x88D0 : "86C964", 0x88D1 : "86C964", 0x88D2 : "86C964", 0x88D3 : "86C964", 0x88F0 : "86C968", 0x88F1 : "86C968", 0x88F2 : "86C968", 0x88F3 : "86C968", 0x8900 : "86C775", 0x8901 : "pciven_5333dev_8C2E&SUBSYS_00011179&REV_054&74C6", 0x8902 : "86C551", 0x8903 : "", 0x8904 : "86C365/366", 0x8905 : "86c765", 0x8906 : "86c765", 0x8907 : "86c765", 0x8908 : "9711 MCN74", 0x8909 : "7699688", 0x890A : "0x00091011", 0x890B : "9726 c19394.00", 0x890C : "86C765", 0x890D : "86C765 Trio64V+ compatible", 0x890E : "9711 MCN74", 0x890F : "86c765", 0x8A01 : "86C375/86C385", 0x8A10 : "86C357", 0x8A11 : "86C359", 0x8A12 : "86C359", 0x8A13 : "86C368", 0x8A20 : "86C391", 0x8A21 : "86C390", 0x8A22 : "86c398", 0x8A23 : "86C394-397", 0x8A25 : "86C370", 0x8A26 : "86C395B", 0x8C00 : "85C260", 0x8C01 : "86C260", 0x8C02 : "86C240", 0x8C03 : "86C280", 0x8C10 : "86C270/274/290/294", 0x8C12 : "86C270/274/290/294", 0x8C13 : "82C294", 0x8C22 : "86C508", 0x8C2A : "86C544", 0x8C2B : "86C553", 0x8C2C : "86C564", 0x8C2D : "86C573", 0x8C2E : "86C584", 0x8C2F : "86C594", 0x8D01 : "86C380/381", 0x8D02 : "86c387", 0x8D04 : "86C410", 0x8E00 : "86C777/787", 0x8E01 : "86C732", 0x9102 : "86c410", 0x9876 : "86C390", 0xCA00 : "86C617", }, 0x544C : { 0x0350 : "IAM", }, 0x5555 : { 0x0003 : "Digital Video OEM computer module", }, 0x5853 : { 0x0001 : "n/a", 0x0002 : "n/a", 0xC000 : "N/A", }, 0x6666 : { 0x0001 : "PCCOM4", 0x0002 : "PCCOM8", }, 0x7d1 : { 0x3304 : "802.11N usb wifi device", 0x3c03 : "Same chipset of RALINK RT2500", 0x3C07 : "PCIVEN_1799&DEV_700F&SUBSYS_700F1799&REV_203&61AAA01&0&48", }, 0x8080 : { 0x1040 : "VIA 82C259 rev 0", }, 0x8086 : { 0x27B8 : "Intel(R) 82801GB/GR (ICH7 Family) LPC Interface Controller", 0x0004 : "IUSB3ROOT", 0x0008 : "Extended Express System Support Ctrlr", 0x0011 : "Ethernet Controller", 0x0042 : "Intel Q57/H55 Clarkdale (Onboard on D2912-A1x)", 0x0044 : "Intel(R) Processor DRAM Controller", 0x0046 : "Intel(R) HD Graphics", 0x0054 : "Audio", 0x0082 : "Centrino Advanced-N 6205 ", 0x0083 : "Intel Centrino Wireless-N 1000", 0x0084 : "Intel Wireless Link WiFi 1000", 0x0085 : "Intel Centrino(R) Advanced-N 6205", 0x0087 : "00E1", 0x008A : "Intel Centrino Wireless-N1030", 0x008B : "Intel(R) Centrino(R) Wireless-N 1030 ", 0x0091 : "Intel Centrino Advanced-N 6230", 0x0100 : "2nd Generation Intel(R) Core(TM) Processor Family DRAM Controller", 0x0102 : "Intel HD Graphics 3000", 0x0104 : "DRAM Controller (Host Bus Controller)", 0x0106 : "asus x501a", 0x010A : "Lenovo TS130 Intel Video Adapter HD", 0x0111 : "Intel Graphics Conroller", 0x0116 : "Intel HD Graphics 3000", 0x0123 : "hardwareids", 0x0152 : "3rd Generation Intel Core Processors with Intel HD Graphics 2500", 0x0153 : "Use the Link", 0x0154 : " 3rd Gen Core processor DRAM Controller (rev 09)", 0x0162 : " Core I7", 0x0166 : "Intel(R) HD Graphics 4000", 0x0189 : "Intel Centrino Wireless Bluetooth 3.0 + High Speed Adapter", 0x027A : "Mobile Intel(R) 945 Express Chipset Family", 0x027D : "High Definition Audio Controller", 0x0283 : "Intel(R) ICH8 Family SMBus Controller", 0x0308 : "PCI Audio Device + modem", 0x0309 : "I/O Processor PCI-to-PCI Bridge Unit", 0x030D : "I/O Companion Unit PCI-to-PCI Bridge", 0x0318 : "General Purpose PCI Processor Address Translation Unit", 0x0319 : "General Purpose PCI Processor Address Translation Unit", 0x0326 : "I/OxAPIC Interrupt Controller", 0x0327 : "I/OxAPIC Interrupt Controller B", 0x0329 : "PCI Express-to-PCI Express Bridge A", 0x032A : "PCI Express-to-PCI Express Bridge B", 0x032C : "PCI Express-to-PCI Express Bridge", 0x0330 : "A-Segment Bridge", 0x0331 : "A-Segment IOAPIC", 0x0332 : "B-Segment Bridge", 0x0333 : "B-Segment IOAPIC", 0x0334 : "Address Translation Unit", 0x0335 : "PCI-X Bridge", 0x0336 : "Address Translation Unit (ATU)", 0x0340 : "Serial to Parallel PCI Bridge A", 0x0341 : "Serial to Parallel PCI Bridge B", 0x0370 : "Segment-A PCI Express-to-PCI Express Bridge", 0x0371 : "A-Bus IOAPIC", 0x0372 : "network controller", 0x0373 : "B-Bus IOAPIC", 0x0374 : "Address Translation Unit", 0x0401 : "P040100", 0x0402 : "HD Graphics 4200", 0x0412 : "ven_8086&dev_0412", 0x0416 : "Intel(R) HD Graphics 4600", 0x0482 : "PCIVEN_8086&DEV_3B64&SUBSYS_20391B0A&REV_06", 0x0483 : "CPU (i486) Bridge (Saturn)", 0x0484 : "SIO ISA Bridge", 0x0486 : "HP Pro 3400 XPx32/7x32 drivers", 0x0493 : "Intel 82579 Gigabit Network Card", 0x04A3 : "Mercury/Neptune Cache/DRAM Controller", 0x0500 : "Processor Bus Controller", 0x0501 : "Memory Controller", 0x0502 : "Scalability Port 0", 0x0503 : "Scalability Port 1 / Glob. Perf. Monitor", 0x0510 : "Hub Interface Port 0 (8-bit compatible)", 0x0511 : "Hub Interface Port 2", 0x0512 : "Hub Interface Port 2", 0x0513 : "Hub Interface Port 3", 0x0514 : "Hub Interface Port 4", 0x0515 : "Server I/O Hub (SIOH)", 0x0516 : "Reliabilty", 0x0530 : "Scalability Port 0", 0x0531 : "Scalability Port 1", 0x0532 : "Scalability Port 2", 0x0533 : "Scalability Port 3", 0x0534 : "Scalability Port 4", 0x0535 : "Scalability Port 5", 0x0536 : "Scalability Port Switch Global Registers", 0x0537 : "Interleave Configuration Registers", 0x0600 : "Storage RAID Controller", 0x0780 : "Intel B75 Express Chipset", 0x0800 : "pci/ven_8086&dev_27da&subsys_30b2103c&rev_023&b1bfb68&0&fb", 0x0885 : "Intel Centrino Wireless-N + WiMAX 6150", 0x0887 : "Intel Centrino Wireless-N 2230", 0x0888 : "Intel Centrino Wireless-N 2230", 0x088E : "Intel Centrino Advanced N 6235", 0x0890 : "Network Controller", 0x0894 : "Centrino Wireless-N 105", 0x0896 : "Intel Centrino Wireless-N 130", 0x08AE : "Intel Centrino Wireless-N 100", 0x08B1 : "Intel(R) Dual Band Wireless-AC 7260", 0x08B3 : "Intel Dual Band Wireless-AC 3160", 0x0960 : "i960 RP Microprocessor/Bridge", 0x0962 : "i960RM/RN Microprocessor/Bridge", 0x0964 : "i960 RP Microprocessor Bridge", 0x0A03 : "Intel(R) Dynamic Platform and Thermal Framework Processor Participant", 0x0A04 : "PCI standard host CPU bridge", 0x0A0C : "High Definition Audio Controller", 0x0A16 : "Intel HD Graphics Controller", 0x0A1E : "Intel(R) HD Graphics", 0x0BE1 : "Intel Graphics Media Accelerator 3600 Series", 0x0C00 : "Intel(R) 4th Gen Core processor DRAM Controller", 0x0C05 : "Intel(R) 6 Series/C200 Series Chipset Family SMBus Controller", 0x0F00 : "Intel(R) Pentium(R)/Celeron(R) processor N-/J- series SoC Transaction Router", 0x0F04 : "High Definition Audio Controller", 0x0F12 : "Intel(R) Pentium(R)/Celeron(R) processor N-/J- series Platform Control Unit - SMBus Port", 0x0F18 : "Intel(R) Trusted Execution Engine Interface", 0x0F1C : "Intel(R) Pentium(R)/Celeron(R) processor N-/J- series Platform Conrol Unit LPC: Bridge to Intel Lega", 0x0F23 : "Intel(R) Pentium(R)/Celeron(R) processor N-/J- series AHCI", 0x0f31 : "Iris pro 5200", 0x0F35 : "Intel(R) USB 3.0 eXtensible Host Controller", 0x0F48 : "Intel(R) Pentium(R)/Celeron(R) processor N-/J- series PCI Express Root Port", 0x0F4A : "Intel(R) Pentium(R)/Celeron(R) processor N-/J- series PCI Express Root Port", 0x100 : "2nd Generation Intel(R) Core(TM) Processor Family DRAM Controller", 0x1000 : "Gigabit Ethernet Controller", 0x1001 : "10/100/1000 Ethernet Controller (Fiber)", 0x1002 : "Pro 100 LAN+Modem 56 CardBus II", 0x1004 : "Gigabit Ethernet Controller (Copper)", 0x1008 : "Gigabit Ethernet Controller (Copper)", 0x1009 : "Intel", 0x100C : "Gigabit Ethernet Controller (Copper)", 0x100D : "Gigabit Ethernet Controller (LOM)", 0x100E : "Intel Pro 1000/MT", 0x100F : "Intel(R) PRO/1000 MT Network Connection", 0x1010 : "Dual Port Gigabit Ethernet Controller (Copper)", 0x1011 : "Gigabit Ethernet Controller (Fiber)", 0x1012 : "Dual Port Gigabit Ethernet Controller (Fiber)", 0x1013 : "Gigabit Ethernet Controller (Copper)", 0x1014 : "Gigabit Ethernet Controller", 0x1015 : "Gigabit Ethernet Controller (LOM)", 0x1016 : "Gigabit Ethernet Controller (LOM)", 0x1017 : "Gigabit Ethernet Controller (LOM)", 0x1018 : "PRO/1000 MT Mobile connection", 0x1019 : "Gigabit Ethernet Controller (LOM)", 0x101A : "Gigabit Ethernet Controller (LOM)", 0x101d : "Dual Port Gigabit Ethernet Controller", 0x101E : "Gigabit Ethernet Controller (Mobile)", 0x1026 : "Gigabit Ethernet Controller", 0x1027 : "Gigabit Ethernet Controller (Fiber)", 0x1028 : "Gigabit Ethernet Controller", 0x1029 : "Fast Ethernet PCI/CardBus Controller", 0x1030 : "PCI Networking device", 0x1031 : "PRO/100 VE Network Connection", 0x1032 : "PRO/100 VE Network Connection", 0x1033 : "multimedia video controller", 0x1034 : "PRO/100 VM Network Connection", 0x1035 : "Phoneline Network Connection", 0x1036 : "Phoneline Network Connection", 0x1037 : "LAN Controller", 0x1038 : "PRO/100 VM/KM Network Connection", 0x1039 : " 82562", 0x103A : "LAN Controller with 82562ET/EZ (CNR) PHY", 0x103B : "LAN Controller with 82562EM/EX PHY", 0x103C : "LAN Controller with 82562EM/EX (CNR) PHY", 0x103D : "PRO/100 VE Network Connection", 0x103E : "PRO/100 VM Network Connection", 0x1040 : "V.92 PCI (DSP) Data Fax Modema", 0x1042 : "PRO/Wireless 2011 LAN PCI Card", 0x1043 : "Intel(R) PRO/Wireless 2100 LAN Card Driver", 0x1048 : "10 Gigabit Ethernet Controller", 0x1049 : "Gigabit Network Connection Interface Controller", 0x104A : "gigabit ethernet", 0x104B : "Gigabit Ethernet", 0x104D : "Intel Gigabit 82566MC", 0x1050 : "PRO/100 VE Network Connection", 0x1051 : "PRO/100 VE Network Connection", 0x1052 : "PRO/100 VM Network Connection", 0x1053 : "PRO/100 VM ork NetConnectionw", 0x1054 : "PRO/100 VE Network Connection (mobile)", 0x1055 : "PRO/100 VM Network Connection (mobile)", 0x1059 : "Fast Ethernet PCI/CardBus Controller", 0x105E : "Intel(R) PRO/1000 PT Dual Port Server Adapter", 0x1064 : "82562EZ PLC", 0x1065 : "LAN Controller Intel Corporation 82562ET/EZ/GT/GZ - PRO/100 VE Ethernet Controller", 0x1068 : "1068h 82562ET/EZ/GT/GZ PRO/100 VE Ethernet Controller", 0x1075 : "Gigabit Ethernet Controller", 0x1076 : "Gigabit Ethernet Controller", 0x1077 : "Gigabit Ethernet Controller (Mobile)", 0x1078 : "Gigabit Ethernet Controller", 0x1079 : "Dual Port Gigabit Ethernet Controller", 0x107A : "Dual Port Gigabit Ethernet Controller (Fiber)", 0x107B : "Dual Port Gigabit Ethernet Controller (Copper)", 0x107C : "Gigabit Ethernet Controller (Copper) rev 5", 0x1080 : "FA82537EP - Intel 537EP V.92 (PCI) modem", 0x108B : "Intel network controller (PCIE Gigabit Ethernet)", 0x108c : "Intel Corporation 82573E Gigabit Ethernet Controller (Copper)", 0x108E : "Intel(R) Active Management Technology - KCS", 0x108F : "Intel(R) Active Management Technology - SOL", 0x1092 : "PRO/100 VE Network Controller", 0x1094 : "get PRO2KXP.exe from Intel", 0x1096 : "Intel PRO/1000 EB", 0x109A : "Intel PRO/1000 PL Network Adaptor", 0x109c : "HP E1Q Express", 0x10a7 : "82575EB Gigabit Network Connection", 0x10a9 : "82575EB Gigabit Backplane Connection", 0x10b5 : "Quad Port Gigabit Ethernet Controller", 0x10b9 : "Intel PRO/1000 PT Desktop", 0x10BD : "Intel 82566DM Gigabit Ethernet Adapter", 0x10C0 : "Intel(R) 82562V-2 10/100 Network Connection", 0x10c4 : "Intel 82562GT 10/100 Network Controller", 0x10c4 : "Intel 82562GT 10/100 Network Controller", 0x10c9 : "82576 Gigabit ET Dual Port Server Adapter", 0x10cd : "Intel(R) 82567LF-2 Gigabit Network Connection", 0x10CE : "Intel 82567V-2 Gigabit Network Connection", 0x10d3 : "Intel 82574L Gigabit Ethernet Controller", 0x10d6 : "82566 DM-2-gigabyte", 0x10DE : "Intel Gigabit network connection", 0x10e6 : "82576 Gigabit Network Connection", 0x10e7 : "82576 Gigabit Network Connection", 0x10E8 : "E64750-xxx Intel Gigabit ET Quad Port Server Adapter", 0x10EA : "Intel 82577LM Gigabit LAN Controller", 0x10EC : "Realtek 171 High Definition Audio", 0x10EF : "Intel 82578DM Gigabit Ethernet Controller", 0x10F0 : "Intel(R) 82578DC Gigabit NIC", 0x10F5 : "Intel 82567LM-2 Gigabit Network Connection", 0x10fb : "10 Gb Ethernet controller", 0x1100 : "Host-Hub Interface Bridge / DRAM Ctrlr", 0x1101 : "AGP Bridge", 0x1102 : "Internal Graphics Device", 0x1110 : "Host-Hub Interface Bridge / DRAM Ctrlr", 0x1112 : "Internal Graphics Device", 0x1120 : "Host-Hub Interface Bridge / DRAM Ctrlr", 0x1121 : "AGP Bridge", 0x112D : "pci simple controller ", 0x1130 : "Host-Hub Interface Bridge / DRAM Ctrlr", 0x1131 : "AGP Bridge", 0x1132 : "Internal Graphics Device [810/815 chipset AGP]", 0x1161 : "I/O APIC Device", 0x1162 : "XScale 80200 Companion Chip (FPGA)", 0x1179 : "Dual Port Gigabit Ethernet Controller", 0x12 : "00", 0x1200 : "Network Processor", 0x1209 : "Fast Ethernet Controller for xp pc", 0x1221 : "PCMCIA Bridge", 0x1222 : "IDE Ctrlr", 0x1223 : "Audio Controller", 0x1225 : "Orion Extended Express CPU to PCI Bridge", 0x1226 : "EtherExpress PRO/10", 0x1227 : "LAN Controller with 82562EM", 0x1228 : "Intelligent 10/100 Fast Ethernet Adapter", 0x1229 : "Intel(R) PRO/100 http://ftp.dell.com/network/R56484.EXE", 0x122D : "System Controller (TSC)", 0x122E : "PCI to ISA Bridge (Triton)", 0x1230 : "IDE Interface (Triton)", 0x1231 : "DSVD Modem", 0x1234 : "PCI to ISA Bridge", 0x1235 : "Mobile System Controller (MTSC)", 0x1237 : "PCI & Memory", 0x1239 : "IDE Interface (Triton)", 0x123B : "PCI to PCI Docking Bridge", 0x123C : "Mobile PCI-to-ISA Bridge (MISA)", 0x123D : "Programmable Interrupt Device", 0x123E : "Integrated Hot-Plug Controller (IHPC)", 0x123F : "Integrated Hot-Plug Controller (IHPC)", 0x124 : "24c6", 0x1240 : "AGP Graphics Accelerator", 0x124B : "Mobile PCI-to-PCIsdsdsdI2)", 0x124B : "24c6", 0x124C : "Mobile PCI-to-PCI Bridge (MPCI2)", 0x1250 : "System Controller (TXC)", 0x12D8 : "SIGMATEL STAC 92XX C-Major HD Audio", 0x1360 : "Hub Interface to PCI Bridge", 0x1361 : "Advanced Interrupt Controller", 0x13ca : "VVVVVV", 0x1460 : "Hub Interface-to-PCI Bridge", 0x1461 : "I/OxAPIC Interrupt Controller", 0x1462 : "Hot Plug Controller", 0x1502 : "Intel 82579LM Gigabit Network Card", 0x1503 : "Gigabit Network Connection", 0x150a : "82576NS Gigabit Ethernet Controller", 0x150C : "Intel 82583V Gigabit Ethernet Controller", 0x150d : "82576 Gigabit Backplane Connection", 0x150e : "82580 Gigabit Network Connection", 0x150f : "82580 Gigabit Fiber Network Connection", 0x1510 : "82580 Gigabit Backplane Connection", 0x1511 : "82580 Gigabit SFP Connection", 0x1516 : "82580 Gigabit Network Connection", 0x1518 : "82576NS SerDes Gigabit Network Connection", 0x1521 : "i350 Gigabit Network Connection", 0x1525 : "Intel 82567V-4 Gigabit Network Connection", 0x1526 : "Intel Gigabit ET2 Quad Port Server Adapter", 0x1533 : "Intel I210 Gigabit Network Connection", 0x153A : "Intel I217-LM Ethernet Connection", 0x153B : "Intel Gigabit Ethernet Controller I217-V", 0x1559 : "Intel Ethernet I218-V", 0x155a : "Gigabit Ethernet", 0x15A1 : "Intel Ethernet Connection I218-V", 0x15A2 : "Gigabit Ethernet", 0x167D : "PCI Simple Communications Controller", 0x1960 : "i960RP Microprocessor", 0x1962 : "Promise SuperTrak SX6000 IDE RAID Controller", 0x1A12 : "Eicon DIVA Server Voice PRI 2.0 (PCI)", 0x1A13 : "Eicon DIVA Server Voice PRI 2.0 (PCI)", 0x1A20 : "", 0x1A21 : "Host-Hub Interface A Bridge / DRAM Ctrlr", 0x1A22 : "Host to I/O Hub Bridge (Quad PCI)", 0x1A23 : "AGP Bridge", 0x1A24 : "Hub Interface B Bridge", 0x1A30 : "Host-Hub Interface Bridge", 0x1A31 : "AGP Bridge", 0x1A38 : "5000 Series Chipset DMA Engine", 0x1A3E : "C216 Chipset - Platform controller hub", 0x1c02 : "Intel(R) 6/C200 Series Chipset Family 6 Port SATA AHCI Controller", 0x1c02 : "Intel(R) Desktop/Workstation/Server Express Chipset SATA AHCI Controller", 0x1C03 : "Intel(R) CPT Chipset Family 6 Port SATA AHCI Controller ", 0x1C10 : "Intel(R) 6/C200 Series Chipset Family PCI Express Root Port", 0x1C18 : "Intel(R) 6/C200 Series Chipset Family PCI Express Root Port", 0x1C1A : "1C3Asfsfsdf", 0x1C1C : "Intel(R) 6/C200 Series Chipset Family PCI Express Root Port", 0x1C1E : "Intel(R) 6/C200 Series Chipset Family PCI Express Root Port", 0x1C20 : "High Definition Audio Controller", 0x1C22 : "Intel(R) 6 Series/C200 Series Chipset Family SMBus Controller", 0x1C26 : "Intel(R) 6/C200 Series Chipset Family USB Enhanced Host Controller", 0x1C2D : "Intel(R) 6/C200 Series Chipset Family USB Enhanced Host Controller", 0x1c34 : "pci simple communications controller", 0x1c3a : "Intel Management Engine Interface", 0x1c3a : "Intel Management Engine Interface", 0x1C3b : "Series Chipset Family HECI Controller #2", 0x1C3D : "Intel(R) Active Management Technology - SOL", 0x1C49 : "04", 0x1C4C : "Intel(R) Q65 Express Chipset Family LPC Interface Controller", 0x1D3A : "X79/C600 series chipset Management Engine Interface", 0x1D3D : "Intel Active Management Technology AMT", 0x1e00 : "2 ports IDE Controller", 0x1e02 : "Intel 7 Series/C216 Chipset Family SATA AHCI Controller", 0x1E03 : "Intel(R) 7 Series Chipset Family SATA AHCI Controller", 0x1e08 : "2 ports IDE Controller", 0x1E10 : "Intel(R) 7 Series/C216 Chipset Family PCI Express Root Port", 0x1E12 : "Intel(R) 7 Series/C216 Chipset Family PCI Express Root Port", 0x1E16 : "Intel(R) 7 Series/C216 Chipset Family PCI Express Root Port", 0x1E20 : "High Definition Audio Controller", 0x1E22 : "SM-Bus Controller of the Intel Z77 Chipset", 0x1E26 : "Intel(R) 7 Series/C216 Chipset Family USB Enhanced Host Controller", 0x1E2D : "Intel(R) 7 Series/C216 Chipset Family USB Enhanced Host Controller", 0x1e31 : "Intel USB 3.0", 0x1E3A : "Intel Management Engine Interface (MEI)", 0x1E3A : "8555555555555999999999999999999999999999999999999999999999999999999900000000000000000001222222222222", 0x1E3A : "C216 Chipset - Platform controller hub", 0x1E3D : "Intel(R) AMT LMS_SOL for AMT 8.xx", 0x1E59 : "Intel(R) HM76 Express Chipset LPC Controller", 0x1f41 : "Intel Corporation Ethernet Connection I354", 0x2000 : "505943621", 0x2014 : "Framegrabber", 0x2048 : "Fast Ethernet 10/100 Base-T Controller", 0x2124 : "PRO/100 VE Network Connection", 0x2125 : "AC97 Audio Controller. website to download - http://www.intel.com/design/chipsets/manuals/29802801.p", 0x2222 : "Intel Management Interface", 0x2250 : "Intel(R) Xeon Phi(TM) Coprocessor", 0x2255 : "023", 0x225C : "Intel(R) Xeon Phi(TM) Coprocessor", 0x225D : "Intel(R) Xeon Phi(TM) Coprocessor", 0x225E : "Intel(R) Xeon Phi(TM) Coprocessor", 0x2406 : "AC97 Modem Controller / PCI Modem", 0x2410 : "LPC Interface", 0x2411 : "IDE Controller (UltraATA/66)", 0x2412 : "USB Controller", 0x2413 : "SMBus Controller", 0x2415 : "Aureal (AD1881 SOUNDMAX) Placa Me Asaki P3-141", 0x2416 : "AC'97 Modem Controller", 0x2418 : "Hub Interface-to-PCI Bridge 82801 PCI-2448 0x2448", 0x2420 : "LPC Interface", 0x2421 : "IDE Controller (UltraATA/33)", 0x2422 : "USB Controller", 0x2423 : "SMBus Controller", 0x2425 : "Audio controler", 0x2426 : "AC97 Modem Controller", 0x2428 : "Hub Interface-to-PCI Bridge", 0x2431 : "pci bus", 0x2440 : "LPC Interface Bridge", 0x2441 : "IDE Controller (UltraATA/66)", 0x2442 : "USB Controller", 0x2443 : "SMBus Controller", 0x2444 : "USB Controller", 0x2445 : "AC97 Audio Controller", 0x2446 : "AC97 Modem Controller", 0x2448 : "Hub Interface to PCI Bridge", 0x2449 : "82559ER Integrated 10Base-T/100Base-TX Ethernet Controller", 0x244A : "IDE Controller", 0x244B : "IDE Controller", 0x244C : "LPC Interface Bridge", 0x244E : "Intel(R) 82801 PCI Bridge", 0x2450 : "LPC Interface Bridge", 0x2452 : "USB Controller", 0x2453 : "SMBus Controller", 0x2459 : "LAN0 Controller", 0x245B : "IDE Controller", 0x245D : "Multimedia Audio Controller", 0x245E : "Hub Interface to PCI Bridge", 0x2480 : "LPC Interface Bridge", 0x2481 : "IDE Controller (UltraATA/66)", 0x2482 : "USB Controller", 0x2483 : "SMBus Controller", 0x2484 : "USB Controller", 0x2485 : "AC97 Audio Controller", 0x2486 : "AC 97 Modem Controller", 0x2487 : "USB Controller", 0x248A : "UltraATA IDE Controller", 0x248B : "UltraATA/100 IDE Controller", 0x248C : "LPC Interface or ISA bridge: see Notes", 0x248D : "USB 2.0 EHCI Contoroller", 0x24C0 : "LPC Interface Bridge", 0x24C2 : "USB UHCI Controller #1", 0x24C3 : "modem", 0x24C4 : "USB UHCI Controller", 0x24C5 : "Realtek AC97", 0x24C5 : "PCI Simple Communications Controller", 0x24C5 : "VIA Vynil v700b", 0x24c5 : "Soundmax Integrated Digital Audio", 0x24C5 : "Intel 82801 DB DBM/DA AC 97 Audio Controller", 0x24c5 : "Audio Controller", 0x24C6 : "AC97 Modem Controller / PCI Modem", 0x24C7 : "USB UHCI Controller #3", 0x24CA : "IDE Controller (UltraATA/100)", 0x24CB : "IDE Controller (UltraATA/100)", 0x24CC : "LPC Interface Bridge", 0x24CD : "USB EHCI Controller", 0x24D0 : "LPC Interface Bridge", 0x24D1 : "SATA Controller", 0x24D2 : "USB UHCI Controller 1", 0x24D3 : "SMBus Controller", 0x24D4 : "USB UHCI Controller #2", 0x24D5 : "Analog Devices AD1888/AD1980 @ Intel 82801EB ICH5 - AC'97 Audio Controller [A-2/A-3]", 0x24D6 : "Motorola SM56 Data Fax Modem", 0x24D7 : "USB UHCI Controller #3", 0x24DB : "EIDE Controller", 0x24DC : "LPC Interface Controller", 0x24DD : "USB EHCI Controller", 0x24DE : "USB UHCI Controller #4", 0x24DF : "SATA Controller (RAID)", 0x24E4 : "intel ", 0x2500 : "Host-Hub Interface Bridge / DRAM Ctrlr", 0x2501 : "Host Bridge (MCH)", 0x2502 : "", 0x2503 : "", 0x2504 : "", 0x250B : "Host Bridge (MCH)", 0x250F : "AGP Bridge", 0x2520 : "Memory Translator Hub (MTH)", 0x2521 : "Audio Device on High Definition Audio Bus", 0x2530 : "Host-Hub Interface Bridge(A2 step)", 0x2531 : "Host-Hub Interface_A Bridge (DP mode)", 0x2532 : "AGP Bridge", 0x2533 : "Hub Interface_B Bridge", 0x2534 : "Hub Interface_C Bridge", 0x2535 : "PCI Bridge", 0x2536 : "PCI Bridge", 0x2539 : "(Quad Processor mode)", 0x2540 : "Host-HI Bridge & DRAM Controller", 0x2541 : "DRAM Controller Error Reporting", 0x2543 : "HI_B Virtual PCI-to-PCI Bridge", 0x2544 : "HI_B PCI-to-PCI Bridge Error Reporting", 0x2545 : "HI_C Virtual PCI-to-PCI Bridge", 0x2546 : "HI_C PCI-to-PCI Bridge Error Reporting", 0x2547 : "HI_D Virtual PCI-to-PCI Bridge", 0x2548 : "HI_D PCI-to-PCI Bridge Error Reporting", 0x254C : "Host Controller", 0x2550 : "Host Controller", 0x2551 : "Host RAS Controller", 0x2552 : "PCI-to-AGP Bridge", 0x2553 : "Hub Interface_B PCI-to-PCI Bridge", 0x2554 : "Hub I/F_B PCI-to-PCI Bridge Error Report", 0x255d : "Host Controller", 0x2560 : "DRAM Controller / Host-Hub I/F Bridge", 0x2561 : "Host-to-AGP Bridge", 0x2562 : "Integrated Graphics Device", 0x2562 : "SATA RAID CONTROLLER", 0x2570 : " 82848P", 0x2571 : " 82848P", 0x2572 : "Integrated Graphics Device", 0x2573 : " 82848P", 0x2576 : " 82848P", 0x2578 : "DRAM Controller / Host-Hub Interface", 0x2579 : "PCI-to-AGP Bridge", 0x257A : "", 0x257B : "PCI to CSA Bridge", 0x257E : "Overflow Configuration", 0x2580 : "Host Bridge / DRAM Controller", 0x2581 : " 925X/XE?", 0x2582 : "82915g/gv/910gl Express Chipset Family", 0x2582 : "82915g/gv/910gl Express Chipset Family", 0x2584 : "Host Bridge / DRAM Controller", 0x2585 : "", 0x2588 : "Host Bridge/DRAM Controller", 0x2589 : "PCI Express Bridge", 0x258A : "Internal Graphics", 0x2590 : "Mobile Intel(R) 915GM/PM/GMS/910GML Express Processor to DRAM Controller", 0x2592 : "Graphic controller family", 0x25A1 : "LPC Interface Bridge", 0x25A2 : "PATA100 IDE Controller", 0x25A3 : "SATA Controller(IDE Mode)", 0x25A4 : "SMBus Controller", 0x25A6 : "AC'97 Audio Controller", 0x25A7 : "AC'97 Modem Controller", 0x25A9 : "USB 1.1 UHCI Controller #1", 0x25AA : "USB 1.1 UHCI Controller #2", 0x25AB : "Watchdog Timer", 0x25AC : "APIC1", 0x25AD : "USB 2.0 EHCI Controller", 0x25AE : "Hub Interface to PCI-X Bridge", 0x25B0 : "Serial ATA Controller (RAID mode)", 0x2600 : "Hub Interface 1.5", 0x2601 : "PCI Express Port D", 0x2602 : "PCI Express Port C0", 0x2603 : "PCI Express Port C1", 0x2604 : "PCI Express Port B0", 0x2605 : "PCI Express Port B1", 0x2606 : "PCI Express Port A0", 0x2607 : "PCI Express Port A1", 0x2640 : "LPC Interface Bridge", 0x2641 : "LPC Interface Bridge (ICH6-M)", 0x2651 : "SATA Controller", 0x2652 : "SATA RAID Controller", 0x2652 : "SATA Controller", 0x2652 : "SATA Raid Controller", 0x2652 : "AHCI Controller", 0x2653 : "SATA AHCI Controller", 0x2653 : "SATA IDE Controller", 0x2653 : "AHCI Controller", 0x2658 : "USB UHCI Controller #1", 0x2659 : "USB UHCI Controller #2", 0x265A : "USB UHCI Controller #3", 0x265B : "USB UHCI Controller #4", 0x265C : "USB 2.0 EHCI Controller", 0x266 : "VIA AC97 codec incorporated into VT82C686VT8251 SouthbridA/B", 0x2660 : "PCI Express Port 1", 0x2662 : "PCI Express Port 2", 0x2664 : "PCI Express Port 3", 0x2666 : "PCI Express Port 4", 0x2668 : "82801FB (ICH6) High Definition Audio Controller", 0x2669 : "jkn ", 0x266A : "SMBus Controller", 0x266C : "LAN Controller", 0x266D : "http://www.dell.com/support/drivers/us/en/19/DriverDetails/DriverFileFormats?DriverId=R104087&FileId", 0x266E : "Driver audio digitale integrato ADI SoundMAX - HP DV4000", 0x266F : "PATA100 Controller - 266F", 0x2670 : "LPC Interface Controller", 0x2678 : "8280 (ICH6) High Defininition Audio Controller", 0x2680 : "SATA Controller(IDE Mode)", 0x2681 : "631xESB/632xESB SATA AHCI Controller", 0x2682 : "Intel(R) ESB2 SATA RAID Controller", 0x269B : "SMBus Controller", 0x269E : "PATA100 IDE Controller", 0x27 : "ICH7 Family", 0x2770 : "Intel(R) 945G/GZ/GC/P/PL Processor to I/O Controller", 0x2771 : "Host to PCI Express Bridge", 0x2772 : "PCIVEN_8086&DEV_2772&SUBSYS_2A57103C&REV_023&11583659&0&10", 0x2776 : "INTEL(R) 82945G EXPRESS FAMILY", 0x277C : "Intel 975X Express Chipset", 0x2780 : "Graphics device", 0x2782 : "Graphics device: 82915G/GV/910GL Express Chipset Family", 0x2792 : "Mobile Intel(R) 915GM/GMS/", 0x2794 : "Mobile chipset", 0x27A0 : "Mobile Intel(R) 945GM/GU/PM/GMS/940GML/943GML and Intel(R) 945GT Express Processor to DRAM Controlle", 0x27A1 : "Intel Corporation Mobile 945PM Express PCI Express Root Port", 0x27A2 : "Mobile Intel(R) 945 Express Chipset Family", 0x27A6 : "Intel 945GM/950", 0x27B8 : "Intel(R) ICH7 Family LPC Interface Controller", 0x27B9 : "Intel(R) ICH7M/U LPC Interface Controller", 0x27BC : "NM10 Family LPC Interface Controller", 0x27c0 : "Intel(R) N10/ICH7 Family Serial ATA Storage Controller", 0x27C1 : "AHCI Controller", 0x27c3 : "Raid Controller", 0x27c4 : "SATA IDE Controller", 0x27C5 : "Intel(R) ICH7/M/MDH SATA AHCI Controller", 0x27C6 : "Raid Controller", 0x27c8 : "Intel(R) N10/ICH7 Family USB Universal Host Controller", 0x27c9 : "Intel(R) N10/ICH7 Family USB Universal Host Controller", 0x27CA : "Intel(R) N10/ICH7 Family USB Universal Host Controller", 0x27CB : "Intel(R) N10/ICH7 Family USB Universal Host Controller", 0x27CC : "Intel(R) N10/ICH7 Family USB2 Enhanced Host Controller", 0x27D0 : "Intel(R) 82801G (ICH7 Family) PCI Express Root Port", 0x27D2 : "Intel(R) 82801G (ICH7 Family) PCI Express Root Port", 0x27D4 : "Intel(R) N10/ICH7 Family PCI Express Root Port", 0x27d8 : "UAA Bus Driver for HD Audio", 0x27d8 : "Microsoft UAA Bus HD Audio", 0x27D9 : "IDT High Definition Audio Driver ", 0x27DA : "Intel(R) N10/ICH7 Family SMBus Controller", 0x27DC : "Intel PRO/100 VE Desktop Adapter", 0x27DC : "Intel PRO/100 VE Desktop Adapter", 0x27DE : "AUDIO (ALC850) << Realtek ", 0x27df : "Intel(R) ICH7 Family Ultra ATA Storage Controller", 0x2802 : "INTEL(R) HIGH DEFINITION AUDIO HDMI", 0x2803 : "Intel(R) High Definition Audio HDMI Service", 0x2804 : "IntcDAudModel", 0x2807 : "Intel HDMI Audio Chip", 0x2812 : "Intel(R) ICH8DH LPC Interface Controller", 0x2815 : "Intel(R) ICH8M LPC Interface Controller - 2815 Driver", 0x2820 : "SATA IDE Controller:4 port", 0x2821 : "AHCI Controller", 0x2822 : "Raid Controller", 0x2824 : "ICH8 AHCI Controller", 0x2825 : "Intel Q35", 0x2828 : "SATA IDE Controller", 0x2829 : "Intel(R) ICH8M SATA AHCI Controller", 0x282A : "Raid Controller", 0x2830 : "Intel(R) ICH8 Family USB Universal Host Controller", 0x2831 : "Intel(R) ICH8 Family USB Universal Host Controller", 0x2832 : "Intel(R) ICH8 Family USB Universal Host Controller", 0x2834 : "Intel(R) ICH8 Family USB Universal Host Controller", 0x2835 : "Intel(R) ICH8 Family USB Universal Host Controller", 0x2836 : "Intel(R) ICH8 Family USB2 Enhanced Host Controller", 0x283A : "ICH8 Family USB2 Enhanced Host Controller", 0x283E : "Intel(R) ICH8 Family SMBus Controller", 0x283F : "Intel(R) ICH8 Family PCI Express Root Port", 0x284 : "Microsoft UAA bus for HD audio", 0x2841 : "Intel(R) ICH8 Family PCI Express Root Port", 0x2843 : "Intel(R) ICH8 Family PCI Express Root Port", 0x2845 : "Intel(R) ICH8 Family PCI Express Root Port", 0x2847 : "Intel(R) ICH8 Family PCI Express Root Port", 0x284B : "Microsoft UAA bus for HD audio", 0x2850 : "Intel(R) ICH8M Ultra ATA Storage Controller", 0x2880 : "Intel Display Audio", 0x2888 : "Q945", 0x2914 : "LPC bridge of ICH9", 0x2916 : "PCI Simple Communications-Controller ", 0x2918 : "http://dlcdnet.asus.com/pub/ASUS/misc/utils/MB_WIN7_ATK.ZIP", 0x2919 : "Intel(R) ICH9M/M-E Family 4 Port SATA AHCI Controller", 0x2920 : "SATA IDE Controller:4 port", 0x2921 : "SATA IDE Controller:2 port1", 0x2922 : "AHCI Controller", 0x2923 : "ICH9 AHCI Controller", 0x2925 : "Raid Controller", 0x2926 : "SATA IDE Controller:2 port2", 0x2928 : "SATA IDE Controller:2port1", 0x2929 : "ICH9M/ME AHCI Controller", 0x292D : "SATA IDE Controller:2port2", 0x292E : "SATA IDE Controller:1port2", 0x2930 : "2930", 0x2930 : "Intel ICH9 Family SMBus Controller", 0x2932 : "Intel(R) ICH9 Family Thermal Subsystem", 0x2934 : "Intel(R) ICH9 Family USB Universal Host Controller", 0x2935 : "Intel(R) ICH9 Family USB Universal Host Controller", 0x2936 : "Intel(R) ICH9 Family USB Univeral Host Controller", 0x2937 : "Intel(R) ICH9 Family USB Universal Host Controller", 0x2938 : "Intel(R) ICH9 Family USB Universal Host Controller", 0x2939 : "Intel(R) ICH9 Family USB Universal Host Controller", 0x293A : "Intel(R) ICH9 Family USB2 Enhanced Host Controller", 0x293C : "Intel(R) ICH9 Family USB2 Enhanced Host Controller", 0x293E : "82801IB/IR/IH (ICH9 Family) HD Audio Controller", 0x293E : "82801IB/IR/IH (ICH9 Family) HD Audio Controller", 0x2940 : "Intel(R) ICH9 Family PCI Express Root Port", 0x2942 : "Intel(R) ICH9 Family PCI Express Root Port", 0x2944 : "Intel(R) ICH9 Family PCI Express Root Port", 0x2948 : "Intel(R) ICH9 Family PCI Express Root Port", 0x294C : "Intel(R) 82566DC-2 Gigabit Network Connection", 0x2972 : "Onboard Video Device for 82946GZ chips", 0x2986 : "Intel", 0x2987 : "Intel PCI Serial Port", 0x2992 : "Intel(R) Express Chipset video", 0x2993 : "Intel(R) Express Chipset Dell Version", 0x2994 : "Intel Management Engine Interface (HECI)", 0x2996 : "IDE Controller", 0x2997 : "PCI Serial Port", 0x29a0 : "Intel P965/G965 Processor to I/O Controller", 0x29a1 : " 82G965", 0x29A2 : "Intel 82G965 Graphics and Memory Controller Hub (GMCH)", 0x29A4 : "", 0x29A6 : "IDE Controller", 0x29B2 : "Intel(R) Q35 Express Chipset Family", 0x29B3 : "Intel Graphics", 0x29B4 : "Intel ME: Management Engine", 0x29B4 : "Management Engine Driver", 0x29B6 : "IDE Controller", 0x29B7 : "Serial Over LAN", 0x29C2 : "Intel(R) G33 chipset GMA3100 video Driver", 0x29C2 : "Intel(R) G33 chipset GMA3100 video Driver", 0x29C4 : "Intel ME: Management Engine Interface", 0x29C6 : "IDE Controller", 0x29D4 : "Intel Management Interface", 0x29D6 : "IDE Controller", 0x29E6 : "IDE Controller", 0x29F6 : "IDE Controller", 0x2A00 : "Mobile Intel(R) PM965/GM965/GL960/GS965 Express Processor to DRAM Controller", 0x2A02 : "Intel GM965", 0x2A03 : "Mobile Intel(R) 965 Express Chipset Family", 0x2A04 : "Intel PCI communication controller-Intel Management Engine Interface update", 0x2A06 : "IDE Controller", 0x2A07 : "Intel PCI Serial Port", 0x2A08 : "Intel(R) Extended Thermal Model MCH", 0x2A12 : "Mobile Intel(R) 965 Express Chipset Family", 0x2A16 : "IDE Controller", 0x2A40 : "Mobile Intel(R) 4 Series Chipset Processor to DRAM Controller", 0x2A42 : "Intel Mobile Graphic", 0x2A43 : "Intel Mobile Graphic", 0x2A44 : "IC658", 0x2A46 : "IDE Controller", 0x2a47 : "Active Management Technology - SOL", 0x2A52 : "IDE Controller", 0x2C62 : "QuickPath Architecture Generic Non-core Registers", 0x2D01 : "QuickPath Architecture System Address Decoder", 0x2D10 : "QPI Link", 0x2D11 : "QPI Physical", 0x2E06 : "IDE Controller", 0x2E12 : "Intel Q45/Q43 Express Chipset", 0x2e13 : "Intel(R) 4 Series Internal Chipset", 0x2E14 : "Intel Management Engine Interface (HECI)", 0x2E15 : "Intel AMT LMS_SOL for AMT 5.xx", 0x2E16 : "IDE Controller", 0x2E17 : "Intel AMT LMS_SOL for AMT 5.xx", 0x2E24 : "pci simple communications controller", 0x2E24 : "Intel Management Engine Interface", 0x2E26 : "IDE Controller", 0x2e29 : "Intel(R) 4 Series Chipset PCI Express Root Port - 2E29", 0x2E30 : "Intel(R) 4 Series Chipset Processor to I/O Controller", 0x2E32 : "Intel G41 express graphics", 0x2E33 : "ghaphics chipset g41TY", 0x2E33 : "ghaphics chipset g41 ghaphics chipset g41 ", 0x2E46 : "IDE Controller", 0x2E96 : "IDE Controller", 0x2f00 : "multimedia audio device (codec AC97) SoundMAX or VIA", 0x3092 : "I2O 1.5 RAID Controller", 0x3200 : "PCI-X to Serial ATA Controller", 0x3252 : "SUBSYS", 0x3340 : "Host-Hub Interface Bridge", 0x3341 : "AGP Bridge", 0x3342 : "Power Management", 0x3400 : "Intel(R) 5520/5500/X58 I/O Hub to ESI Port", 0x3408 : "Intel(R) 7500/5520/5500/X58 I/O Hub PCI Express Root Port", 0x3409 : "Intel 7500 Chipset PCIe Root Port", 0x340A : "Intel(R) 7500/5520/5500/X58 I/O Hub PCI Express Root Port", 0x340B : "Intel 7500 Chipset PCIe Root Port", 0x340C : "Intel 7500 Chipset PCIe Root Port", 0x340E : "Intel(R) 7500/5520/5500/X58 I/O Hub PCI Express Root Port", 0x3410 : "Intel 7500 Chipset PCIe Root Port", 0x3422 : "Intel(R) 7500/5520/5500/X58 I/O Hub GPIO and Scratch Pad Registers", 0x3423 : "SRCU21/SRCU31 Microsoft Windows* 2000 Memory Management Files", 0x342E : "Intel(R) 7500/5520/5500/X58 I/O Hub System Management Registers", 0x3438 : "Intel(R) 7500/5520/5500/X58 I/O Hub Throttle Registers", 0x3463 : "NTPNP_PCI0002", 0x3464 : "NTPNP_PCI0002", 0x3465 : "NTPNP_PCI0002", 0x348D : "Gigabit Ethernet Controller", 0x34c5 : "Realtek AC97 (NOT an intel)", 0x3575 : "Host-Hub I/F Bridge / SDRAM Controller", 0x3576 : "Host-AGP Bridge", 0x3577 : "Integrated Graphics Device", 0x3578 : "CPU to I/O Bridge", 0x3579 : "SDRAM Controller / Host-hub Interface", 0x357B : "Integrated Graphics Device", 0x3580 : "Host-Hub Interface Bridge", 0x3581 : "Virtual PCI to AGP Bridge", 0x3582 : "Integrated Graphics Device", 0x3584 : "System Memory Controller", 0x3585 : "Configuration Process", 0x3590 : "Memory Controller Hub", 0x3591 : "Memory Controller Hub", 0x3592 : "Memory Controller Hub", 0x3593 : "MCH Error Reporting Registers", 0x3594 : "DMA Controller Registers", 0x3595 : "PCI Express Port A", 0x3596 : "PCI Express Port B", 0x3597 : "PCI Express Port B", 0x3598 : "PCI Express Port B1", 0x3599 : "PCI Express Port C", 0x359A : "PCI Express Port C1", 0x359B : "Extended Configuration Registers", 0x359E : "MCH Control Registers", 0x360B : "intel simple communication controller", 0x3A00 : "ICH10 4 port SATA IDE Controller", 0x3A02 : "ICH10D SATA Controller", 0x3A03 : "ICH10 AHCI", 0x3A05 : "ICH10D SATA Controller", 0x3A06 : "SATA2(2Port1)", 0x3A14 : "82801JDO ICH10DO", 0x3A16 : "Intel(R) ICH10R LPC Interface Controller", 0x3A1A : "82801JD ICH10D", 0x3A20 : "Intel(R) ICH10 Family 4 port Serial ATA Storage Controller", 0x3A22 : "AHCI Controller", 0x3A23 : "ICH10 AHCI", 0x3A26 : "Intel(R) ICH10 Family 2 port Serial ATA Storage Controller", 0x3A30 : "INTEL(R) ICH10 Family SMB controller ", 0x3A34 : "Intel(R) ICH10 Family USB Universal Host Controller", 0x3A35 : "Intel(R) ICH10 Family USB Universal Host Controller", 0x3A36 : "Intel(R) ICH10 Family USB Universal Host Controller", 0x3A37 : "Intel(R) ICH10 Family USB Universal Host Controller", 0x3A38 : "Intel(R) ICH10 Family USB Universal Host Controller", 0x3A39 : "Intel(R) ICH10 Family USB Universal Host Controller", 0x3A3A : "Intel(R) ICH10 Family USB Universal Host Controller", 0x3A3C : "Intel(R) ICH10 Family USB Enhanced Host Controller", 0x3A3E : "Microsoft UAA Bus Driver for High Definition Audio", 0x3A40 : "Intel(R) ICH10 Family PCI Express Root Port", 0x3A42 : "Intel(R) ICH10 Family PCI Express Root Port", 0x3a60 : "SM-Bus Controller", 0x3B00 : "LPC Interface Controller", 0x3B01 : "LPC Interface Controller", 0x3B02 : "LPC Interface Controller", 0x3B03 : "LPC Interface Controller", 0x3B06 : "LPC Interface Controller", 0x3B07 : "Intel(R) 5/3400 Series Chipset LPC Interface Controller", 0x3B08 : "LPC Interface Controller", 0x3B09 : "LPC Interface Controller", 0x3B0A : "LPC Interface Controller", 0x3B0B : "LPC Interface Controller", 0x3B0D : "LPC Interface Controller", 0x3B0F : "LPC Interface Controller", 0x3B12 : "LPC Interface Controller", 0x3B14 : "LPC Interface Controller", 0x3B16 : "LPC Interface Controller", 0x3B20 : "SATA IDE 4-Port Desktop", 0x3B21 : "SATA IDE 2-Port Desktop", 0x3B22 : "SATA AHCI 6-Port Desktop", 0x3B23 : "SATA AHCI 4-Port Desktop", 0x3B24 : "SATA Enhanced RAID", 0x3B25 : "SATA Raid Controller", 0x3B26 : "SATA IDE 2-Port Secondary Desktop", 0x3B28 : "SATA IDE 4-Port Mobile", 0x3B29 : "SATA AHCI 4-Port Mobile", 0x3B2B : "SATA Enhanced RAID", 0x3B2C : "SATA Raid Controller", 0x3B2D : "SATA IDE Controller:2 port", 0x3B2E : "SATA IDE 4-Port Mobile", 0x3B2F : "Intel(R) 5/3400 Series Chipset Family 6 Port SATA AHCI Controller", 0x3B30 : "Intel(R) 5/3400 Series Chipset Family SMBus Controller", 0x3B32 : "Intel(R) 5/3400 Series Chipset Family Thermal Subsystem", 0x3B34 : "Standard Enhanced PCI to USB Host Controller", 0x3B3C : "Standard Enhanced PCI to USB Host Controller", 0x3B42 : "Intel(R) 5/3400 Series Chipset Family PCI Express Root Port", 0x3B44 : "Intel(R) 5/3400 Series Chipset Family PCI Express Root Port", 0x3B46 : "Intel(R) 5/3400 Series Series Chipset Family PCI Express Root Port", 0x3B48 : "Intel(R) 5/3400 Series Chipset Family PCI Express Root Port", 0x3B56 : "High Definition Audio Controller", 0x3b63 : "06", 0x3B64 : "Management Engine Driver", 0x3B64 : "Intel Management Engine Interface", 0x3B64 : "Management Engine Driver", 0x3B64 : "Intel Management Engine Interface", 0x3B64 : "Intel Management Engine Interface", 0x3B64 : "intel", 0x3b64 : "ven_8086&dev_3b64&subsys_fd3c1179&rev_06 [Toshiba C660-1CN]", 0x3b65 : "06", 0x3B67 : "Intel(R) Active Management Technology - Serial Over LAN (SOL) ", 0x4000 : "V.90 HaM Modem", 0x402f : "Intel (R) 5400 Chipset QuickData Technology device - 402F", 0x4220 : "Intel 54 MBit/s Notebook WLAN Card", 0x4222 : "Intel 3945ABG Wireless LAN controller", 0x4223 : "Intel (R) PRO/Wireless 2200BG Network Connection", 0x4223 : "Intel (R) PRO/Wireless 2200BG Network Connection", 0x4224 : "802.11a/b/g WLan adapter", 0x4227 : "Intel(R) PRO/Wireless 3945ABG", 0x4229 : "Intel Wireless WiFi Link 4965AGN(supporting 802.11a/b/g/Draft-N)", 0x422B : "Intel(R) Centrino(R) Ultimate-N 6300 AGN", 0x422C : "Intel(R) Centrino(R) Advanced-N 6200 AGN", 0x422D : "Intel Wireless WiFi Link 4965AGN", 0x4230 : "Intel Wireless WiFi Link 4965AGN", 0x4232 : "Carte Intel WiFi Link 5100 AGN", 0x4233 : "Intel Wireless WiFi Link 4965AGN", 0x4235 : "Intel WiFi Link 5300 AGN", 0x4236 : "Intel(R) WiFi Link 5300 AGN", 0x4237 : "Intel (R) WiFi Link 5100 AGN", 0x4238 : "Intel Centrino Ultimate-N 6300 AGN", 0x4239 : "Intel(R) Centrino(R) Advanced-N 6200 AGN", 0x423A : "PRO/Wireless 5350 AGN [Echo Peak]", 0x423C : "WiMAX/WiFi Link 5150", 0x4318 : "Dell Wireless 1370 WLAN Mini-PCI Card", 0x444E : "Intel TurboMemory", 0x4813 : "Dell Wireless 1370 WLAN Mini-PCI Card", 0x482 : "PCIVEN8086", 0x4836 : "2425678", 0x4888 : "intel 3945abg wireless lan controller", 0x5001 : "Modem - PPP", 0x5005 : "Modem - PPPoA", 0x5029 : "AHCI Controller", 0x502A : "SATA Controller", 0x502B : "SATA Controller", 0x5200 : "PCI to PCI Bridge", 0x5201 : "Network Controller", 0x5309 : "I/O Processor Address Translation Unit", 0x530D : "I/O Companion Unit Address Translation", 0x6741 : "Intel USB 3.0", 0x6960 : "EHCI 960 emulator", 0x7000 : "PIIX3 PCI-to-ISA Bridge (Triton II)", 0x7010 : "PIIX3 IDE Interface (Triton II)", 0x7020 : "PIIX3 USB Host Controller (Triton II)", 0x7030 : "System Controller", 0x7051 : "Intel Business Video Conferencing Card", 0x7100 : "System Controller (MTXC)", 0x7110 : "Intel 82371AB/EB PCI to ISA bridge (ISA mode)", 0x7111 : "Intel(R) 82371AB/EB PCI Bus Master IDE Controller", 0x7112 : "PIIX4/4E/4M USB Interface", 0x7113 : "PIIX4/4E/4M Power Management Controller", 0x7120 : "Host-Hub Interface Bridge / DRAM Ctrlr", 0x7121 : "Graphics Controller", 0x7122 : "Host-Hub Interface Bridge / DRAM Ctrlr", 0x7123 : "Intel 82810 Graphics Controller", 0x7123 : "Intel 82810 Graphics Controller", 0x7124 : "Host-Hub Interface Bridge / DRAM Ctrlr", 0x7125 : "Intel Direct AGP 810Chipset ", 0x7126 : "Host Bridge and Memory Controller Hub", 0x7127 : "Graphics Device (FSB 133 MHz)", 0x7128 : "Host Bridge and Memory Controller Hub", 0x712A : "Host Bridge and Memory Controller Hub", 0x7180 : "Host/PCI bridge in 440LX/EX AGP chipset", 0x7181 : "AGP device in 440LX/EX AGP chipset", 0x7182 : "intel", 0x7190 : "Intel 82443BX Pentium(R) II Processor to PCI Bridge", 0x7191 : "Intel 82443BX Pentium(R) II Processor to AGP Controller", 0x7192 : "440BX/ZX chipset Host-to-PCI Bridge", 0x7194 : "AC'97 Audio device", 0x7195 : "AC97 Audio Controller", 0x7196 : "AC97 Modem Controller (Winmodem)", 0x7198 : "PCI to ISA Bridge", 0x7199 : "EIDE Controller", 0x719A : "USB Universal Host Controller", 0x719B : "Power Management Controller", 0x71A0 : "Host-to-PCI Bridge", 0x71A1 : "fabricated by Intel ", 0x71A2 : "Host-to-PCI Bridge", 0x7221 : "graphics device", 0x7600 : "LPC/FWH Interface", 0x7601 : "EIDE Controller", 0x7602 : "USB Host Controller", 0x7603 : "SM Bus Controller", 0x7605 : "IEEE1394 OpenHCI Host Controller", 0x7800 : "AGP Graphics Accelerator", 0x7800 : "PCIVEN_8086&DEV_3B64&CC_0780", 0x803b : "0x81ef", 0x8083 : "Intel Wireless WiFi Link 5100 ABGN 10/100/1000 Base T", 0x8086 : "0x8086&DEV_3B64&CC_0780", 0x8086 : "intel", 0x8086 : "VIA vynil v700b", 0x8086 : "REV_003&61AAA01&0&50 ", 0x8086 : "VIA vynil v700b", 0x8086 : "pci simple communications controller", 0x8086 : "pci simple communications controller", 0x8086 : "Management Engine Driver", 0x8086 : "Intel(R) Management Engine Interface", 0x8108 : "Intel(R) Graphics Media Accelerator 500 https://downloadcenter.intel.com/confirm.aspx?httpDown=http", 0x811A : "Atom SCH PATA", 0x8186 : "i dont know", 0x8280 : "Realtek AC97", 0x84C4 : "450KX/GX PCI Bridge (Orion)", 0x84C5 : "450KX/GX Memory Controller (Orion)", 0x84CA : "450NX PCIset Memory & I/O Controller", 0x84CB : "PCI Expander Bridge", 0x84E0 : "System Address controller", 0x84E1 : "System Data Controller", 0x84E2 : "Graphics Expander Bridge", 0x84E3 : "Memory Address Controller", 0x84E4 : "Memory Data Controller", 0x84E6 : "Wide and fast PCI eXpander Bridge", 0x84EA : "AGP Bridge (GXB function 1)", 0x85A1 : "LPC Bridge", 0x85A2 : "IDE Controller", 0x85A3 : "Serial ATA Controller", 0x85A4 : "SMBus Controller", 0x85A6 : "AC'97 Audio Controller", 0x85A7 : "AC'97 Modem Controller", 0x85A9 : "USB 1.1 UHCI Controller #1", 0x85AA : "USB 1.1 UHCI Controller #2", 0x8C02 : "Intel(R) 8 Series/C220 Series SATA AHCI Controller", 0x8C22 : "Intel(R) 8 Series/C220 Series SMBus Controller", 0x8c31 : "Intel USB 3.0 eXtensible", 0x8C3A : "Intel(R) Management Engine Interface", 0x8C5C : "Intel(R) H81 LPC Controller", 0x8CBA : "PCI Simple Communications Controller", 0x8d3a : "Intel Management Engine Interface (MEI)", 0x9620 : "I2O RAID PCI to PCI Bridge", 0x9621 : "I2O 1.5 RAID Controller", 0x9622 : "I2O 1.5 RAID Controller", 0x9641 : "I2O 1.5 RAID Controller", 0x96A1 : "I2O 1.5 RAID Controller", 0x9779 : "0x2992", 0x9874 : "AUDIO CONTROLLER", 0x9876 : "intel brokdale", 0x9876 : "IntcDAudModel", 0x9877 : "1", 0x9888 : "HDAUDIOFUNC_01&VEN_8086&DEV_27d8&REV_1000", 0x9998 : " 02", 0x9999 : "Interface chip", 0x9C03 : "Intel(R) 8 Series SATA Controller", 0x9C14 : "Intel(R) 9 Series PCI Express Root Port", 0x9C20 : "High Definition Audio Controller", 0x9C22 : "Intel(R) 8 Series SMBus Controller", 0x9C24 : "Intel(R) 8 Series Thermal", 0x9C26 : "Intel(R) 8 Series USB Enhanced Host Controller", 0x9C31 : "Intel USB 3.0 eXtensible Host Controller", 0x9C3A : "Intel Management Engine Interface driver", 0x9c3a : "PCI Simple Communication Controller", 0x9c3a : "PCI Simple Communication Controller", 0x9C43 : "Intel(R) 8 Series LPC Controller", 0x9CB1 : "Intel USB 3.0 Driver for Intel 8 and U/Y Series ", 0xA001 : "Intel Media Accelerator 3150", 0xA002 : "Intel Grafik-Media-Accelerator 3150 (Intel GMA 3150)", 0xA011 : "Intel(R) Graphics Media Accelerator 3150", 0xA012 : "Intel Graphics Media Accelerator 3150", 0xA011 : "3&33FD14CA&0&10", 0xA012 : "Intel(R) ICH8 Family SMBus Controller", 0xB152 : "PCI to PCI Bridge", 0xB154 : "PCI to PCI Bridge", 0xB555 : "Non-Transparent PCI-to-PCI Bridge", 0xC50 : "sdf", 0xE13A : "NXMOQSN00430812D49", 0xF4E : "Intel(R) Pentium(R)/Celeron(R) processor N-/J- series PCI Express Root Port", 0x27c8 : "Microsoft UAA Bus HD Audio", 0x27d8 : "INTEL IDT Audio", 0x999 : "PCIVEN_8086&DEV_2930&SUBSYS_037E1014&REV_023&61AAA01&0&FB", 0x1c3a : "REV-04", 0x1E3A : "i5-3210", 0x3B64 : "Chip Description:Management Engine Driver", 0x1c3a : "REV-04 3&11583659", 0x1C3A : "Intel(R) Management Engine Interface", 8671 : "", }, 0x8087 : { 0x0028 : "MCP67 High Definition Audio", 0x07D6 : "Intel Centrino Wireless-N + WiMAX 6150", }, 0x80EE : { 0x7145 : "VirtualBox Graphics Adapter", 0xBEEF : "VirtualBox Graphics Adapter", 0xCAFE : "VirtualBox Device", }, 0x8866 : { 0x1685 : "MP3 player/FM radio/voice recorder 256 Mo flash", 0x1689 : "MP3 player/FM radio/voice recorder 256 Mo flash", }, 0x9004 : { 0x0078 : "AHA-2940UW/CN", 0x1078 : "RAID Coprocessor", 0x1135 : "Texas Instruments", 0x1160 : "Fibre Channel Adapter", 0x2178 : "SCSI Controller", 0x3860 : "AIC-2930U Ultra SCSI Ctrlr", 0x3B78 : "QuadChannel Fast-Wide/Ultra-Wide Diff. SCSI Ctrlr", 0x5075 : "SCSI Ctrlr", 0x5078 : "Fast/Wide SCSI Controller", 0x5175 : "SCSI Ctrlr", 0x5178 : "FAST-SCSI Ctrlr", 0x5275 : "SCSI Ctrlr", 0x5278 : "Fast SCSI Ctrlr", 0x5375 : "SCSI Ctrlr", 0x5378 : "Fast SCSI Ctrlr", 0x5475 : "SCSI Ctrlr", 0x5478 : "Fast SCSI Ctrlr", 0x5575 : "SCSI Ctrlr", 0x5578 : "Fast SCSI Ctrlr", 0x5675 : "SCSI Ctrlr", 0x5678 : "Fast SCSI Ctrlr", 0x5775 : "SCSI Ctrlr", 0x5778 : "Fast SCSI Ctrlr", 0x5800 : "PCI-to-1394 Ctrlr", 0x5900 : "ATM155 & 25 LAN Controller", 0x5905 : "ATM Adpater", 0x6038 : "Ultra SCSI Adpater (VAR)", 0x6075 : "CardBus Ultra SCSI Controller", 0x6078 : "PCI SCSI Controller", 0x6178 : "PCI SCSI Controller", 0x6278 : "SCSI Ctrlr", 0x6378 : "SCSI Ctrlr", 0x6478 : "SCSI Ctrlr", 0x6578 : "SCSI Ctrlr", 0x6678 : "SCSI Ctrlr", 0x6778 : "SCSI Ctrlr", 0x6915 : "Fast Ethernet", 0x7078 : "Fast and Wide SCSI Ctrlr", 0x7178 : "Fast/Fast-Wide SCSI Ctrlr", 0x7278 : "Multichannel Fast/Fast-Wide SCSI Ctrlr", 0x7378 : "4-chan RAID SCSI Ctrlr", 0x7478 : "SCSI Ctrlr", 0x7578 : "Multichannel Fast/Fast-Wide Diff. SCSI Ctrlr", 0x7678 : "QuadChannel Fast-Wide/Ultra-Wide Diff. SCSI Ctrlr", 0x7778 : "SCSI Ctrlr", 0x7810 : "Memory control IC", 0x7815 : "RAID + Memory Controller IC", 0x7850 : "Fast/Wide SCSI-2 Controller", 0x7855 : "Single channel SCSI Host Adapter", 0x7860 : "PCI SCSI Controller", 0x7870 : "Fast/Wide SCSI-2 Controller", 0x7871 : "SCSI", 0x7872 : "Multiple SCSI channels", 0x7873 : "Multiple SCSI channels", 0x7874 : "Differential SCSI", 0x7880 : "Fast 20 SCSI", 0x7890 : "SCSI controller", 0x7891 : "SCSI controller", 0x7892 : "SCSI controller", 0x7893 : "SCSI controller", 0x7894 : "SCSI controller", 0x7895 : "Ultra-Wide SCSI Ctrlr on AHA-2940 AHA-394x", 0x7896 : "SCSI controller", 0x7897 : "SCSI controller", 0x8078 : "Ultra Wide SCSI", 0x8178 : "Ultra/Ultra-Wide SCSI Ctrlr", 0x8278 : "AHA-3940U/3940UW/3940UWD SCSI Ctrlr", 0x8378 : "SCSI Controller", 0x8478 : "Ultra-Wide Diff. SCSI Ctrlr", 0x8578 : "Fast-Wide/Ultra-Wide Diff. SCSI Ctrlr", 0x8678 : "QuadChannel Ultra-Wide Diff. SCSI Ctrlr", 0x8778 : "Ultra-Wide SCSI Ctrlr", 0x8878 : "Ultra Wide SCSI Controller", 0x8B78 : "AIC-7880P", 0xEC78 : "QuadChannel Fast-Wide/Ultra-Wide Diff. SCSI Ctrlr", }, 0x9005 : { 0x0010 : "AHA-2940U2W/U2B", 0x0011 : "AHA-2930U2 Ultra2 SCSI Host Adapter", 0x0013 : "SCSI Controller", 0x001F : "Ultra2-Wide SCSI controller", 0x0020 : "SCSI Controller", 0x002F : "SCSI Controller", 0x0030 : "SCSI Controller", 0x003F : "SCSI Controller", 0x0050 : "AHA-3940U2x/3950U2x Ultra2 SCSI Adapter", 0x0051 : "AHA-3950U2x Ultra2 SCSI Adapter", 0x0053 : "SCSI Controller", 0x005F : "Ultra2 SCSI Controller", 0x0080 : "Ultra160/m PCI SCSI Controller", 0x0081 : "Ultra160 SCSI Controller", 0x0083 : "Ultra160 SCSI Controller", 0x008F : "Ultra160 SCSI Controller", 0x00C0 : "Ultra160 SCSI Controller", 0x00C1 : "Ultra160 SCSI Controller", 0x00C3 : "Ultra160 SCSI Controller", 0x00C5 : "RAID Subsystem HBA", 0x00CF : "Ultra160 SCSI Controller", 0x0241 : "Adaptec 1420SA Serial AHA HostRAID Controller", 0x0258 : "Adaptec AAR-2610SA SATA 6-Port Raid", 0x0285 : "PCIX133 32/64bit", 0x0286 : "SUBSYS_95801014REV_02", 0x041F : "SAS/SATA Controller", 0x043E : "SAS/SATA Controller", 0x41E : "Razor ASIC", 0x564A : "iSCSI Controller", 0x8000 : "Ultra320 SCSI Controller", 0x800F : "Ultra320 SCSI Controller", 0x8010 : "Ultra320 SCSI Controller", 0x8011 : "Ultra320 SCSI Controller", 0x8012 : "Ultra320 SCSI Controller", 0x8014 : "Ultra320 SCSI Controller", 0x8015 : "Ultra320 SCSI Controller", 0x8016 : "Ultra320 SCSI Controller", 0x8017 : "Ultra320 SCSI Controller", 0x801C : "Ultra320 SCSI Controller", 0x801D : "Ultra320 SCSI Controller", 0x801E : "Ultra320 SCSI Controller", 0x801F : "Ultra320 SCSI Controller", 0x8080 : "Ultra320 HostRAID Controller", 0x808F : "Ultra320 HostRAID Controller", 0x8090 : "HostRAID SCSI Controller", 0x8091 : "HostRAID SCSI Controller", 0x8092 : "HostRAID SCSI Controller", 0x8093 : "HostRAID SCSI Controller", 0x8094 : "HostRAID SCSI Controller", 0x8095 : "HostRAID SCSI Controller", 0x8096 : "HostRAID SCSI Controller", 0x8097 : "HostRAID SCSI Controller", 0x809C : "HostRAID SCSI Controller", 0x809D : "HostRAID SCSI Controller", 0x809E : "HostRAID SCSI Controller", 0x809F : "HostRAID SCSI Controller", }, 0x9412 : { 0x6565 : "IDE Controller?", }, 0x9710 : { 0x7705 : "USB 1.1 to Single Parallel Controller ", 0x7830 : "USB 2.0 to 10/100M Fast Ethernet Controller", 0x8729 : "usb 2.0 10/100M ethernet adaptor", 0x9805 : "MosChip PCI Parallel Port", 0x9815 : "MCS9815 / M-CAB Parallel Adapter", 0x9835 : "2 serial", 0x9845 : "2 serial", 0x9865 : "PCI Porta Paralela", 0x9900 : "NetMOS Single Parallel Port Card", 0x9904 : "PCIe to Multi IO Controller", 0x9912 : "PCIe to Dual Serial and Single Parallel", 0x9922 : "PCIe to Dual Serial Port Controller", }, 0x9902 : { 0x0001 : "SG2010", 0x0002 : "SG2010", 0x0003 : "SG1010", }, 0xA0F1 : { 0x9876 : "0x9876", }, 0xA200 : { 0xa200 : "tv", }, 0xA259 : { 0x3038 : "PCIVEN_103C&DEV_3302&SUBSYS_3305103C&REV_00", }, 0xA304 : { 0x3038 : "USB", }, 0xA727 : { 0x0013 : "3com 11 a/b/g wireless PCI Adapter", }, 0xAA42 : { 0x03A3 : "9400-0931", }, 0xC0DE : { 0x5600 : "", 0xC0DE : "oZ0030", }, 0xD4D4 : { 0x010F : "PMC-211", 0x0601 : "PCI Mezzanine Card", }, 0xDEAF : { 0x9050 : "", 0x9051 : "", 0x9052 : "", }, 0xE159 : { 0x0001 : "Yeastar TDM400", 0x0002 : "Sedlbauer Speed PCI", 0x0600 : "PCI-to-PCI Bridge", }, 0xEACE : { 0x24C5 : "VIA Vynil v700b", 0x3100 : "OC-3/OC-12", 0x3200 : "OC-3/OC-12", 0x320E : "Fast Ethernet", 0x340E : "Fast Ethernet", 0x341E : "Fast Ethernet", 0x3500 : "OC-3/OC-12", 0x351C : "Fast Ethernet", 0x4100 : "OC-48", 0x4110 : "OC-48", 0x4200 : "OC-48", 0x420E : "Dual Gigabit Ethernet", 0x430e : "Dual Gigabit Ethernet", }, 0xECC0 : { 0x0050 : "", 0x0051 : "", 0x0060 : "", 0x0070 : "", 0x0071 : "", 0x0072 : "", 0x0080 : "4/2 channel (analog/digital) audio card", 0x0100 : "6/8 channel (analog/digital) audio card", 0x3410 : "Motorola", }, 0xEDD8 : { 0xA091 : "ARK1000PV", 0xA099 : "ARK2000PV", 0xA0A1 : "ARK2000MT", 0xA0A9 : "ARK2000MI", 0xA0B1 : "ARK2000MI+", }, 0xFA57 : { 0x0001 : "Pattern Matching Chip", } }
chipsecintel/chipsec
source/tool/chipsec/hal/pcidb.py
Python
gpl-2.0
407,606
[ "BWA", "CRYSTAL", "Octopus", "VisIt" ]
d7b84a313115933c07ded0ee6b07f7b2f43fd98bcb4089b03eee09ef923b6078
from __future__ import division, print_function import numpy as np import warnings import xactcore as xore import lanczos # $$\ # $$ | # $$ | $$$$$$\ $$$$$$$\ $$$$$$\ # $$ | \____$$\ $$ __$$\ $$ __$$\ # $$ | $$$$$$$ |$$ | $$ |$$ / $$ | # $$ | $$ __$$ |$$ | $$ |$$ | $$ | # $$$$$$$$\\$$$$$$$ |$$ | $$ |\$$$$$$$ | # \________|\_______|\__| \__| \____$$ | # $$\ $$ | # \$$$$$$ | # \______/ # Calculate resolvents by a Lanczos inversion algorithm. def elastic_amplitude(chs, multiplet, im, ip, omegas, eta, iterations, functional=False): """ The total elastic cotunneling ampltiudes over a range of omegas. Parameters ---------- chs : xore.ChargeState Object The relevant ChargeState. multiplet : ndarray The initial/final multiplet as defined by xore.ChargeState.eigenstates. im : int site where one electron is subtracted. ip : int site where one electron is added. omegas : ndarray The sampling energies. eta : float Imaginary part of the resolvent. iterations : int Number of Lanczos iversion iterations. functional : bool Whether Hamiltonian is represented as a functional or a sparse matrix. """ ampls = elastic_amplitudes(chs, multiplet, im, ip, omegas, eta, iterations, functional=False) amp = np.zeros(ampls['W'][0].shape, dtype=np.float) for i, W in enumerate(ampls['W']): pre = 1 if i == 0 else 2 amp += pre*W for i, J in enumerate(ampls['J']): pre = 1 if i == 0 else 2 amp += pre*J return amp def elastic_amplitudes(chs, multiplet, im, ip, omegas, eta, iterations, functional=False): """ The elastic amplitudes Parameters ---------- chs : xore.ChargeState Object The relevant ChargeState. multiplet : ndarray The initial and final multiplet as defined by xore.ChargeState.eigenstates. im : int site where one electron is subtracted. ip : int site where one electron is added. omegas : ndarray The sampling energies. eta : float Imaginary part of the resolvent. iterations : int Number of Lanczos iversion iterations. functional : bool Whether Hamiltonian is represented as a functional or a sparse matrix. """ # coefficients coeffs = elastic_coefficients(chs, multiplet, im, ip, iterations, functional) # number of channels ampls = {} ampls['W'] = [0]*len(coeffs['W']) Es = (multiplet['E'], multiplet['E']) for i, W in enumerate(coeffs['W']): ampls['W'][i] = amplitude(omegas, Es, eta, W[0], W[3]) if W[1] is not None: ampls['W'][i] += amplitude(omegas, Es, eta, W[1], W[2]) ampls['J'] = [0]*len(coeffs['J']) if len(coeffs['J']) > 0: for i, J in enumerate(coeffs['J']): ampls['J'][i] = amplitude(omegas, Es, eta, J[0], J[1]) return ampls def inelastic_amplitude(chs, aplet, bplet, im, ip, omegas, eta, iterations, functional=False): """ The total inelastic cotunneling ampltiudes over a range of omegas. Parameters ---------- chs : xore.ChargeState Object The relevant ChargeState. aplet : ndarray The initial multiplet as defined by xore.ChargeState.eigenstates. bplet : ndarray The final multiplet as defined by xore.ChargeState.eigenstates. im : int site where one electron is subtracted. ip : int site where one electron is added. omegas : ndarray The sampling energies. eta : float Imaginary part of the resolvent. iterations : int Number of Lanczos iversion iterations. functional : bool Whether Hamiltonian is represented as a functional or a sparse matrix. """ ampls = inelastic_amplitudes(chs, aplet, bplet, im, ip, omegas, eta, iterations, functional=False) amp = np.zeros((len(ampls['W'][0]),), dtype=np.float) for i, W in enumerate(ampls['W']): pre = 1 if i == 0 else 2 amp += pre*W for i, J in enumerate(ampls['J']): pre = 1 if i == 0 else 2 amp += pre*J return amp def inelastic_amplitudes(chs, aplet, bplet, im, ip, omegas, eta, iterations, functional=False): """ Calculates the different channels inelastic amplitudes over a range of omegas. Parameters ---------- chs : xore.ChargeState Object The relevant ChargeState. aplet : ndarray The initial multiplet as defined by xore.ChargeState.eigenstates. bplet : ndarray The final multiplet as defined by xore.ChargeState.eigenstates. im : int site where one electron is subtracted. ip : int site where one electron is added. omegas : ndarray The sampling energies. eta : float Imaginary part of the resolvent. iterations : int Number of Lanczos iversion iterations. functional : bool Whether Hamiltonian is represented as a functional or a sparse matrix. Returns ------- ampls : Dict ampls['W'] or ampls['J'] wraps a list [0 ..] with entries for each channel. Each entry contains an array with the channel transport amplitudes at the corresponding omegas. """ coeffs = inelastic_coefficients(chs, aplet, bplet, im, ip, iterations, functional) # number of channels ampls = {} ampls['W'] = [0]*len(coeffs['W']) for i, W in enumerate(coeffs['W']): ampls['W'][i] = amplitude(omegas, (aplet['E'], bplet['E']), eta, W[0], W[3]) if W[1] is not None: ampls['W'][i] += amplitude(omegas, (aplet['E'], bplet['E']), eta, W[1], W[2]) ampls['J'] = [0]*len(coeffs['J']) if len(coeffs['J']) > 0: for i, J in enumerate(coeffs['J']): ampls['J'][i] = \ amplitude(omegas, (aplet['E'], bplet['E']), eta, J[0], J[1]) return ampls def amplitude(omegas, energies, eta, pes, ipes): """ Return the amplitude as a function of energy. Parameters ---------- omega : ndarray relevant energies energy : offset energy of given state eta : float constant imaginary contribution to the energies pes : list list of coefficient list for particle transport processes ipes : list list of coefficient list for hole transport processes """ etas = eta if not hasattr(eta, '__iter__'): etas = [eta]*2 if len(energies) < 2: Epes = Eipes = energies else: Epes, Eipes = energies A = B = 0 # for i, _ in xange(len(pes)): if pes is not None: A += amplitude_real(omegas + Epes, etas[0], pes) B += amplitude_imag(omegas + Epes, etas[0], pes) if ipes is not None: A += -amplitude_real(-omegas + Eipes, etas[1], ipes) B += -amplitude_imag(-omegas + Eipes, etas[1], ipes) return np.abs(A)**2 + np.abs(B)**2 def amplitude_real(omegas, eta, gp): """ The real part of the propagator Parameters ---------- omegas : ndarray Range of energies. eta : float Imaginary part of the denominator. gp : Dict Contains the continued fraction coefficients. """ A = lanczos.continued_fraction_vectorized( omegas, 1j*eta - gp['ase'], gp['bse'] ) * gp['wve'] if 'aso' in gp: A += -lanczos.continued_fraction_vectorized( omegas, 1j*eta - gp['aso'], gp['bso'] ) * gp['wvo'] A /= 4 return A def amplitude_imag(omegas, eta, gp): """ Imaginary part of the propagator Parameters ---------- omegas : ndarray Range of energies. eta : float Imaginary part of the denominator. gp : Dict Contains the continued fraction coefficients. """ B = 0 if 'asem' in gp: B = lanczos.continued_fraction_vectorized( omegas, 1j*eta - gp['asem'], gp['bsem'] ) * gp['wvem'] B += - lanczos.continued_fraction_vectorized( omegas, 1j*eta - gp['asom'], gp['bsom'] ) * gp['wvom'] B /= 4 return B def elastic_coefficients(chs, multiplet, im, ip, iterations, functional=False): """ Co-tunneling amplitude for transport through this spin-multiplet Parameters ---------- chs : xore.ChargeState Object The current charge state multiplet : list A single member of the list of eigenstates produced by xore.ChargeState.solve im : integer Right lead attachment site ip : int Left lead attachment site iterations: int Number of Lanczos iterations real : bool Is the Hamiltonian real or not? functional : bool Should we calculate the Hamiltonian product with a state using a functional approach (False uses an efficient sparse matrix) """ reps = len(multiplet['V']) # positive size of multiplet (i.e. # m > 0) coeffs = {} # amplitudes coeffs['W'] = [0]*reps # potential scattering amplitudes for i in xrange(reps): ns, state = (multiplet['n'][i], multiplet['V'][i]) ket = xore.State(state, chs.humo.n, nu=ns[0], nd=ns[1]) coeffs['W'][i] = \ potential_coefficients(chs, ns, ket, ket, im, ip, iterations, functional) coeffs['J'] = [0]*(reps - (chs.ne + 1) % 2) # spin change amplitudes dJ = 0 # index change if chs.ne % 2 == 1: # if we have an odd number of electrons nL = multiplet['ns'][0] # nu - nd = +1 ketL = xore.State(multiplet['V'][0], nu=nL[0], nd=nL[1]) # state m =.5 nR = reversed(ns) # nu - nd = -1 ketR = ketL.reversed() # state with m = -.5 coeffs['J'][0] = \ exchange_coefficients(chs, nR, ketL, ketR, im, ip, iterations, functional) # increase index dJ = 1 # spin change amplitudes for i in xrange(reps-1): ns, state = multiplet['n'][i+1], multiplet['V'][i+1] ketL = xore.State(state, chs.humo.n, nu=ns[0], nd=ns[1]) ns, state = multiplet['n'][i], multiplet['V'][i] ketR = xore.State(state, chs.humo.n, nu=ns[0], nd=ns[1]) coeffs['J'][i+dJ] = \ exchange_coefficients(chs, ns, ketL, ketR, im, ip, iterations, functional) return coeffs def inelastic_coefficients(chs, aplet, bplet, im, ip, iterations, functional=False): """ Co-tunneling amplitude for transport through two spin-multiplets Parameters ---------- chs : xore.ChargeState Object The current charge state aplet : list Initial state: A single member of the list of eigenstates produced by xore.ChargeState.solve bplet : list Final state: A single member of the list of eigenstates produced by xore.ChargeState.solve im : int Right lead attachment site ip : int Left lead attachment site iterations: int Number of Lanczos iterations real : bool Is the Hamiltonian real or not? functional : bool Should we calculate the Hamiltonian product with a state using a functional approach (False uses an efficient sparse matrix) """ areps = len(aplet['V']) # positive size of a multiplet (i.e. # m > 0) breps = len(bplet['V']) # positive size of b multiplet (i.e. # m > 0) if areps > breps: # we always have S(a) < S(b) areps, breps = breps, areps aplet, bplet = bplet, aplet coeffs = {} # amplitudes coeffs['W'] = [None]*min(areps, breps) # potential scattering amplitudes for i in xrange(len(coeffs['W'])): ns = aplet['n'][i] aket = xore.State(aplet['V'][i], chs.humo.n, nu=ns[0], nd=ns[1]) bket = xore.State(bplet['V'][i], chs.humo.n, nu=ns[0], nd=ns[1]) coeffs['W'][i] = \ potential_coefficients(chs, ns, bket, aket, im, ip, iterations, functional) coeffs['J'] = [None]*(min(areps, breps) - (chs.ne + 1) % 2) # spin change amplitudes dJ = 0 # index change if chs.ne % 2 == 1: # if we have an odd number electrons nL = aplet['n'][0] # nu - nd = +1 # state m =.5 ketL = xore.State(bplet['V'][0], nu=nL[0], nd=nL[1]) nR = reversed(ns) # nu - nd = -1 # state m = -.5 ketR = xore.State(aplet['V'][0], nu=nL[0], nd=nL[1]).reversed() coeffs['J'][0] = \ exchange_coefficients(chs, nR, ketL, ketR, im, ip, iterations, functional) # increase index offset dJ = 1 for i in xrange(len(coeffs['J'])-dJ): # spin change amplitudes ns = bplet['n'][i+1] ketL = xore.State(bplet['V'][i+1], chs.humo.n, nu=ns[0], nd=ns[1]) ns = bplet['n'][i] ketR = xore.State(aplet['V'][i], chs.humo.n, nu=ns[0], nd=ns[1]) coeffs['J'][i+dJ] = \ exchange_coefficients(chs, ns, ketL, ketR, im, ip, iterations, functional) return coeffs def potential_coefficients(chs, ns, ketL, ketR, im, ip, iterations, functional): """ Potential scattering processes Parameters ---------- chs : xore.ChargeState Object The current charge state ns : tuple ns[0] = number of spin-up electrons, ns[1] = number of spin-down electrons ketL : xore.State Object The initial state im : int Right lead attachment site ip : int Left lead attachment site iterations: int Number of Lanczos iterations real : bool Is the Hamiltonian real or not? functional : bool Should we calculate the Hamiltonian product with a state using a functional approach (False: uses an efficient sparse matrix) """ W = [None]*4 # Add spin up and remove spin up if ns[0] + 1 <= chs.humo.n: dR = (1, ip, 1) dL = (-1, im, 1) nu, nd = ns[0] + 1, ns[1] _, Hm = xore.SzState.hamiltonian(chs.humo, nu + nd, nu, functional) W[0] = coefficients(ketL, dL, Hm, dR, ketR, iterations=iterations, real=chs.humo.real, functional=functional, same_kets=True) # Remove spin down and add spin down if ns[1] > 0: dR = (-1, im, -1) dL = (1, ip, -1) nu, nd = ns[0], ns[1] - 1 _, Hm = xore.SzState.hamiltonian(chs.humo, nu + nd, nu, functional) W[3] = coefficients(ketL, dL, Hm, dR, ketR, iterations=iterations, real=chs.humo.real, functional=functional, same_kets=True) if ns[0] != ns[1]: # Add spin down and remove spin down if ns[1] + 1 <= chs.humo.n: dR = (1, ip, -1) dL = (-1, im, -1) nu, nd = ns[0], ns[1] + 1 _, Hm = xore.SzState.hamiltonian(chs.humo, nu + nd, nu, functional) W[1] = coefficients(ketL, dL, Hm, dR, ketR, iterations=iterations, real=chs.humo.real, functional=functional, same_kets=True) # Remove spin up and add spin up if ns[0] > 0: dR = (-1, im, 1) dL = (1, ip, 1) nu, nd = ns[0] - 1, ns[1] _, Hm = xore.SzState.hamiltonian(chs.humo, nu + nd, nu, functional) W[2] = coefficients(ketL, dL, Hm, dR, ketR, iterations=iterations, real=chs.humo.real, functional=functional, same_kets=True) return W def exchange_coefficients(chs, ns, ketL, ketR, im, ip, iterations, functional): """ Processes which change the molecular spin state Parameters ---------- chs : xore.ChargeState Object The current charge state. ns : tuple ns[0] = number of spin-up electrons, ns[1] = number of spin-down electrons. All with respect to ketR. ketL : xore.State Object The initial molecular state. ketR : xore.State Object The final state. im : int Right lead attachment site. ip : int Left lead attachment site. iterations: int Number of Lanczos iterations. real : bool Is the Hamiltonian real or not? functional : bool Should we calculate the Hamiltonian product with a state using a functional approach (option False uses an efficient sparse matrix). """ J = [None]*2 # Add a spin up and remove a spin down if ns[1] <= chs.humo.n: dR = (1, ip, 1) # add a spin up on ketR dL = (-1, im, -1) # remove a spin down on ketL nu, nd = ns[0] + 1, ns[1] _, Hm = xore.SzState.hamiltonian(chs.humo, nu + nd, nu, functional) J[0] = coefficients(ketL, dL, Hm, dR, ketR, iterations=iterations, real=chs.humo.real, functional=functional, same_kets=False) # Remove a spin down and add a spin up if ns[1] > 0: dR = (-1, im, -1) # remove a spin down on ketR dL = (1, ip, 1) # add a spin up on ketL nu, nd = ns[0], ns[1] - 1 _, Hm = xore.SzState.hamiltonian(chs.humo, nu + nd, nu, functional) J[1] = coefficients(ketL, dL, Hm, dR, ketR, iterations=iterations, real=chs.humo.real, functional=functional, same_kets=False) return J def coefficients(ketL, dL, Hm, dR, ketR, iterations=10, real=True, functional=False, same_kets=False): """ Returns the coefficients to the continued fraction expansion of the resolvent. ketL : xore.State Object Describes the final state. dL : list The left operator (create_or_destruct, site, spin). Hm : Object Hamiltonian of the intermediate state. dR : list The right operator (create_or_destruct, site, spin). ketR : xore.State Object The right (initial) state. iterations : int number of Lanczos iterations. real : boolean Whether real or complex. functional : boolean Whether a partly functional or entirely matrix computation. same_kets : boolean Whether ketL and ketR are the same states or not. """ if iterations < 1: # no iterations return False nonortho = 0 dL = (- dL[0], dL[1], dL[2]) # reverse add or remove if dL[0] == dR[0] and dL[1] == dR[1] and same_kets: v = xore.createannihilate(*dL, state=ketL) wve = np.vdot(v.v, v.v) ase, bse, vend = lanczos.lanczos(v.v, Hm, iterations) if vend > 1e-4: nonortho = 1 G = {'ase': ase, 'bse': bse, 'wve': wve} elif real: va = xore.createannihilate(*dL, state=ketL) vb = xore.createannihilate(*dR, state=ketR) ve, vo = (va.v + vb.v, va.v - vb.v) wve, wvo = (np.vdot(ve, ve), np.vdot(vo, vo)) ase, bse, veend = lanczos.lanczos(ve, Hm, iterations, functional) if veend > 1e-4: nonortho = 1 aso, bso, voend = lanczos.lanczos(vo, Hm, iterations, functional) if voend > 1e-4: nonortho = 1 G = {} G['ase'], G['bse'], G['wve'] = (ase, bse, wve) G['aso'], G['bso'], G['wvo'] = (aso, bso, wvo) else: va = xore.createannihilate(*dL, state=ketL) vb = xore.createannihilate(*dR, state=ketR) ve, vo = (va.v + vb.v, va.v - vb.v) wve, wvo = (np.vdot(ve, ve), np.vdot(vo, vo)) vem, vom = (va.v + 1j*vb.v, va.v - 1j*vb.v) wvem, wvom = (np.vdot(vem, vem), np.dot(vom, vom)) ase, bse, veend = lanczos.lanczos(ve, Hm, iterations, functional) if veend > 1e-4: nonortho = 1 aso, bso, voend = lanczos.lanczos(vo, Hm, iterations, functional) if voend > 1e-4: nonortho = 1 asem, bsem, vemend = lanczos.lanczos(vem, Hm, iterations, functional) if vemend > 1e-4: nonortho = 1 asom, bsom, vomend = lanczos.lanczos(vom, Hm, iterations, functional) if vomend > 1e-4: nonortho = 1 G = {} G['ase'], G['bse'], G['wve'] = ase, bse, wve G['aso'], G['bso'], G['wvo'] = aso, bso, wvo G['asem'], G['bsem'], G['wvem'] = asem, bsem, wvem G['asom'], G['bsom'], G['wvom'] = asom, bsom, wvom if nonortho == 1: warnings.warn('Non-orthogonal Lanczos inversion', UserWarning) return G
georglind/humo
humo/xactlang.py
Python
mit
21,557
[ "ASE" ]
b542503335c5e94e831b85d528847b4e3e508fcf6432b701e66b67a56b5131fa
# IPython log file from collections import deque import matplotlib.pyplot as plt import numpy as np import dask.array as da import skan import napari import networkx as nx from scipy import ndimage as ndi from skimage import morphology from scipy import spatial ################################################## # PART ONE: SHOLL ANALYSIS # https://en.wikipedia.org/wiki/Sholl_analysis # Count the number of crossings of paths of spherical shells away from a # central point. # # First, get the skeleton and summary using skan arr = np.asarray(da.from_zarr('bin.zarr')) filled = ndi.binary_fill_holes(arr) bin_skeleton = morphology.skeletonize(filled) > 0 scale = (1.0785801681301463, 0.6918881978764917, 0.6918881978764917) deg_image = skan.csr.make_degree_image(bin_skeleton) viewer = napari.view_image(deg_image, ndisplay=3) skel = skan.csr.Skeleton(bin_skeleton, spacing=scale) summary = skan.summarize(skel) # see summary.columns for info. # Now, define the centre pixel in real coordinates center_pixel = np.asarray([5, 34, 37]) center_pixel_real = center_pixel * scale # Then, define the shell radii (you can do this however you like, this is # just one suggestion.) dataset_radius = np.sqrt(np.sum( (np.asarray(bin_skeleton.shape) * scale)**2)) / 2 shell_radii = np.linspace(0, dataset_radius, 15) # This function takes a path ID, and returns the distance from each pixel in # the path to the central pixel. (The central pixel must be input in pixel, # not real, coordinates.) # Later, we know that if the distances along the path jump across a shell, # then we can increment the number of crossings of that shell. def path_distances(skel, center_point, path_id): path = skel.path_coordinates(path_id) center_point_scaled = skel.spacing * center_point path_scaled = path * skel.spacing distances = np.ravel( spatial.distance_matrix(path_scaled, [center_point_scaled])) return distances # Array to keep track of the crossings all_crossings = np.zeros_like(shell_radii) # For each path: for i in range(skel.n_paths): # Find the distances of the path pixels distances = path_distances(skel, center_pixel, i) # Find which shell bin each pixel sits in shell_location = np.digitize(distances, shell_radii) # Use np.diff to find where bins are crossed. The -1 accounts for # "shell 0" not existing. crossings = shell_location[np.flatnonzero(np.diff(shell_location))] - 1 # increment the corresponding crossings all_crossings[crossings] += 1 # Plot the number of crossings at each radius plt.plot(all_crossings) plt.show() ############################################################### # PART TWO: # finding "depth" of a branch from the soma of a neuron. # This is standard depth-first-search, but unfortunately networkx DFS doesn't # report the depth, so we copy the code over. labels_layer = viewer.add_labels(np.asarray(skel)) # Create a graph from the junctions junction_graph = nx.Graph() junction_graph.add_edges_from( zip(summary['node-id-src'], summary['node-id-dst'])) # Define the "soma" junction. You probably have better ways of doing this, # in our case, the center pixel was not actually a junction, so we find the # nearest junction to the center pixel coords_junctions = summary[[f'image-coord-src-{i}' for i in range(3)]].to_numpy() ctr = np.all(coords_junctions == center_pixel, axis=1) np.flatnonzero(ctr) distance_from_center = spatial.distance_matrix(coords_junctions, [center_pixel]) nearest_junction_to_center = np.argmin(distance_from_center) soma_junction = int(summary.iloc[nearest_junction_to_center]['node-id-src']) ###################################################### # The next two functions are copied from # https://networkx.org/documentation/stable/_modules/networkx/algorithms/traversal/breadth_first_search.html, # but have been modified to yield the *depth* as well as the edge ID. def generic_bfs_edges(G, source, neighbors=None, depth_limit=None, sort_neighbors=None): if callable(sort_neighbors): _neighbors = neighbors neighbors = lambda node: iter(sort_neighbors(_neighbors(node))) visited = {source} if depth_limit is None: depth_limit = len(G) queue = deque([(source, depth_limit, neighbors(source))]) while queue: parent, depth_now, children = queue[0] try: child = next(children) if child not in visited: yield (parent, child), depth_limit - depth_now + 1 visited.add(child) if depth_now > 1: queue.append((child, depth_now - 1, neighbors(child))) except StopIteration: queue.popleft() def bfs_edges(G, source, reverse=False, depth_limit=None, sort_neighbors=None): if reverse and G.is_directed(): successors = G.predecessors else: successors = G.neighbors yield from generic_bfs_edges(G, source, successors, depth_limit, sort_neighbors) # this gives a list of edges as node-id pairs, together with depth. # you will need to match the node-id pairs to rows in the summary table. # then, you can add a column to the dataframe, e.g., 'depth-from-soma', and # finally colour by that value as shown in vessel-skeleton-analysis.py edges = list(bfs_edges(junction_graph, source=soma_junction)) print(edges[:10]) napari.run()
jni/useful-histories
sholl-analysis.py
Python
bsd-3-clause
5,560
[ "NEURON", "Napari" ]
b1bd978c3de580ffb09200dafadfbb4c5ec7be2f1bf9b777944c75238e9ba74c
# Copyright (C) 2012-2016 Max Planck Institute for Polymer Research # Copyright (C) 2008-2011 Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" Abstract output class for LB simulations. The implemented realisations are: * :class:`espressopp.analysis.LBOutputScreen` to output simulation progress and control flux conservation when using MD to LB coupling. * :class:`espressopp.analysis.LBOutputVzInTime` to output velocity component :math:`v_z` on the lattice site with an index :math:`(0.25*N_i, 0, 0)` in time. * :class:`espressopp.analysis.LBOutputVzOfX` to output local density :math:`\rho` and :math:`v_z` component of the velocity as a function of the coordinate :math:`x`. .. Note:: all derived output classes have to be called from class :class:`espressopp.integrator.ExtAnalyze` with specified periodicity of invokation and after this added to the integrator. See examples. """ from espressopp.esutil import cxxinit from espressopp import pmi from espressopp.analysis.AnalysisBase import * from _espressopp import analysis_LBOutput class LBOutputLocal(AnalysisBaseLocal, analysis_LBOutput): # 'The (local) compute of LBOutput.' def writeOutput(self): if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.writeOutput(self) if pmi.isController : class LBOutput(AnalysisBase): __metaclass__ = pmi.Proxy pmiproxydefs = dict( # cls = 'espressopp.analysis.LBOutputLocal', # pmicall = ["writeOutput"] )
kkreis/espressopp
src/analysis/LBOutput.py
Python
gpl-3.0
2,268
[ "ESPResSo" ]
8f9470a88cb29842256ce92d3fdfd8647d2496200c0bc2643679500379924475
#! /usr/bin/python import cv2 import numpy as n import sys from math import * Img = cv2.imread(sys.argv[1], 0) #Img = n.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) def convolve(Img, G): Height, Width = Img.shape Res = n.zeros((Height, Width)) ICopy = n.zeros((Height+2, Width+2)) InsideW = int((ICopy.size/(Width+2))-1) InsideH = int((ICopy.size/(Height+2))-1) print(InsideW, InsideH) ICopy[1:InsideW, 1:InsideH] = Img Idx = [-1, 0, 1] for I in Idx: for J in Idx: Ix = I + 1 Jx = J + 1 print("Res:", Res) print("ICopy: ", ICopy[Ix:(InsideW+I), Jx:(InsideH+J)]) Res += ICopy[Ix:(InsideW+I), Jx:(InsideH+J)] * G[Ix, Jx] return Res def normalize(Img): Max = n.max(Img) Min = n.min(Img) if (Max > 255) or (Min < 0): Img = (Img - Min)/(Max - Min) Img = Img * 256 return n.uint8(Img) return Img # Sobel G = n.array([ [ 1, 2, 1], [ 0, 0, 0], [ -1, -2, -1] ]) Conv = convolve(Img, G) Conv = normalize(Conv) G = n.array([ [ 1, 0, -1], [ 2, 0, -2], [ 1, 0, -1] ]) Conv = convolve(Conv, G) Conv = normalize(Conv) ''' # Sharpen G = n.array([ [ 0, -1, 0], [ -1, 5, -1], [ 0, -1, 0] ]) # Gaussian G = n.array([ [ 1, 2, 1], [ 2, 4, 2], [ 1, 2, 1] ]) # Box blur G = n.array([ [ 1, 1, 1], [ 1, 1, 1], [ 1, 1, 1] ]) # Laplace G = n.array([ [ -1, -1, -1], [ -1, 8, -1], [ -1, -1, -1] ]) G = n.array([ [ 0, 1, 0], [ 1, -4, 1], [ 0, 1, 0] ]) G = n.array([ [ -1, 2, -1], [ -1, 2, -1], [ -1, 2, -1] ]) ''' print("Conv: ", Conv) print("Img: ", Img) print("Hist: ", n.histogram(Conv)) cv2.imshow("Img", Img) cv2.imshow("Covoluted", n.uint8(Conv > 119)*255) cv2.waitKey(0) cv2.destroyAllWindows()
YuKill/UEMImplementation
ImageProcessing/Convolution.py
Python
unlicense
1,863
[ "Gaussian" ]
3df08e48f245d3c73bcff827ab93348e3f01d9ecf4c067007d13a4883f548f96
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Converter construction support. This module contains a base class for all converters, as well as supporting structures. These structures are referred to as contexts. The class hierarchy is as follows: <your converter> [extends] converter.Base [extends] transformer.Base [extends] gast.nodeTransformer [uses] transfomer.SourceInfo [uses] converter.EntityContext [uses] converter.ProgramContext [uses] transfomer.SourceInfo converter.Base is a specialization of transformer.Base for AutoGraph. It's a very lightweight subclass that adds a `ctx` attribute holding the corresponding EntityContext object (see below). Note that converters are not reusable, and `visit` will raise an error if called more than once. converter.EntityContext contains mutable state associated with an entity that the converter processes. converter.ProgramContext contains mutable state across related entities. For example, when converting several functions that call one another, the ProgramContext should be shared across these entities. Below is the overal flow at conversion: program_ctx = ProgramContext(<entities to convert>, <global settings>, ...) while <program_ctx has more entities to convert>: entity, source_info = <get next entity from program_ctx> entity_ctx = EntityContext(program_ctx, source_info) for <each ConverterClass>: converter = ConverterClass(entity_ctx) # May update entity_ctx and program_ctx entity = converter.visit(entity) <add entity's dependencies to program_ctx> Note that pyct contains a small number of transformers used for static analysis. These implement transformer.Base, rather than converter.Base, to avoid a dependency on AutoGraph. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from enum import Enum from tensorflow.python.autograph.core import config from tensorflow.python.autograph.core import naming from tensorflow.python.autograph.pyct import anno from tensorflow.python.autograph.pyct import ast_util from tensorflow.python.autograph.pyct import cfg from tensorflow.python.autograph.pyct import compiler from tensorflow.python.autograph.pyct import qual_names from tensorflow.python.autograph.pyct import transformer from tensorflow.python.autograph.pyct.static_analysis import activity from tensorflow.python.autograph.pyct.static_analysis import live_values from tensorflow.python.autograph.pyct.static_analysis import liveness from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions from tensorflow.python.autograph.pyct.static_analysis import type_info # TODO(mdan): These contexts can be refactored into first class objects. # For example, we could define Program and Entity abstractions that hold on # to the actual entity and have conversion methods. # TODO(mdan): Add a test specific to this converter. class ProgramContext(object): """ProgramContext keeps track of converting function hierarchies. This object is mutable, and is updated during conversion. Not thread safe. Attributes: recursive: bool, whether to recursively convert any functions that the decorator function may call. autograph_decorators: Tuple[Callable, ...], decorator functions that belong to AutoGraph. These require special treatment. dependency_cache: Dict[Any, ast.AST], the original entities mapped to their converted AST additional_imports: Set[Any], additional entities which for any reason cannot be attached after loading and need to be explicitly imported in the generated code name_map: Dict[str, str], map of original entity name to the name of their converted counterparts autograph_module: Module, a reference to the autograph module. This needs to be specified by the caller to avoid circular dependencies. uncompiled_modules: Set[Tuple[str, ...]], with each tuple representing the fully qualified name of a package containing functions that will not be compiled. required_imports: str, containing an import statement on each line. These are all the imports necessary for the compiled code to run, in addition to the closures of each entity, which are attached dynamically. """ def __init__( self, recursive, autograph_decorators, partial_types, autograph_module, uncompiled_modules, ): self.recursive = recursive self.autograph_decorators = autograph_decorators self.partial_types = partial_types if partial_types else () self.autograph_module = autograph_module self.uncompiled_modules = uncompiled_modules # Required to output dependencies in discovery order, which should match # the reverse dependency order. self.dependency_cache = collections.OrderedDict() self.additional_imports = set() self.name_map = {} @property def required_imports(self): """Returns a block containing all imports required by the converted code.""" # TODO(mdan): Check that these don't clobber one another. return '\n'.join(config.COMPILED_IMPORT_STATEMENTS + tuple(self.additional_imports)) def new_namer(self, namespace): return naming.Namer(namespace, self.recursive, self.name_map, self.partial_types) def update_name_map(self, namer): """Updates renamed_calls based on the recent activity from the namer. Whenever we convert a new entity, any references to other entities are being renamed to match their soon-to-be-converted counterparts. The namer keeps track of these renames. When conversion is complete, we copy those renames so that when those referenced entities are being converted, their new name matches. Args: namer: naming.Namer Raises: ValueError: when an entity was renamed twice and to different names. """ # TODO(mdan): Have call_trees do this directly. # This is done so indirectly, via the namer, for historic reasons. But # now we can have the converter that does the rename record the new name # as well and skip this step altogether. for o, name in namer.renamed_calls.items(): if o in self.name_map: if self.name_map[o] != name: raise ValueError( 'Calls to %s were converted using multiple names (%s). This is ' 'possible when an entity with one of these names already ' 'existed. To fix, avoid using any of these names.' % (o, (name, self.name_map[o]))) else: self.name_map[o] = name def add_to_cache(self, original_entity, converted_ast): self.dependency_cache[original_entity] = converted_ast class EntityContext(object): """Tracks the conversion of a single entity. This object is mutable, and is updated during conversion. Not thread safe. Attributes: namer: Namer info: transformer.EntityInfo program: ProgramContext """ def __init__(self, namer, entity_info, program_ctx): self.namer = namer self.info = entity_info self.program = program_ctx class Base(transformer.Base): """All converters should inherit from this class. Attributes: ctx: EntityContext """ def __init__(self, ctx): super(Base, self).__init__(ctx.info) self.ctx = ctx # Keeping this short because it's used frequently. self._used = False self._ast_depth = 0 def get_definition_directive(self, node, directive, arg, default): """Returns the unique directive for a symbol, or a default if none exist. See lang/directives.py for details on directives. Args: node: ast.AST directive: Callable[..., Any] arg: str default: Any Raises: ValueError: if conflicting annotations have been found """ defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ()) if not defs: return default # TODO(mdan): Simplify this. arg_values = [] for def_ in defs: if (directive not in def_.directives or arg not in def_.directives[directive]): continue arg_value = def_.directives[directive][arg] for prev_value in arg_values: if not ast_util.matches(arg_value, prev_value): qn = anno.getanno(node, anno.Basic.QN) raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' % (qn, directive.__name__, arg, compiler.ast_to_source(arg_value).strip(), compiler.ast_to_source(prev_value).strip())) arg_values.append(arg_value) if not arg_values: return default arg_value, = arg_values return arg_value def visit(self, node): if not self._ast_depth: if self._used: raise ValueError('converter objects cannot be reused') self._used = True self._ast_depth += 1 try: return super(Base, self).visit(node) finally: self._ast_depth -= 1 class AnnotatedDef(reaching_definitions.Definition): def __init__(self): super(AnnotatedDef, self).__init__() self.directives = {} class AgAnno(Enum): """Annotation labels specific to AutoGraph. See anno.py.""" DIRECTIVES = 'User directives associated with the annotated statement.' def __repr__(self): return self.name def standard_analysis(node, context, is_initial=False): """Performs a complete static analysis of the given code. Args: node: ast.AST context: converter.EntityContext is_initial: bool, whether this is the initial analysis done on the input source code Returns: ast.AST, same as node, with the static analysis annotations added """ # TODO(mdan): Clear static analysis here. # TODO(mdan): Consider not running all analyses every time. # TODO(mdan): Don't return a node because it's modified by reference. graphs = cfg.build(node) node = qual_names.resolve(node) node = activity.resolve(node, context.info, None) node = reaching_definitions.resolve(node, context.info, graphs, AnnotatedDef) node = liveness.resolve(node, context.info, graphs) node = live_values.resolve(node, context.info, config.PYTHON_LITERALS) node = type_info.resolve(node, context.info) # This second call allows resolving first-order class attributes. node = live_values.resolve(node, context.info, config.PYTHON_LITERALS) if is_initial: anno.dup( node, { anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS, }, ) return node def apply_(node, context, converter_module): """Applies a converter to an AST. Args: node: ast.AST context: converter.EntityContext converter_module: converter.Base Returns: ast.AST, the result of applying converter to node """ node = standard_analysis(node, context) node = converter_module.transform(node, context) return node
AnishShah/tensorflow
tensorflow/python/autograph/core/converter.py
Python
apache-2.0
11,712
[ "VisIt" ]
d3e4316578cfad81274daf0d37b332e980d8dc899bd5b0b4f1676b051e6f6466
############################################################################### ## ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: contact@vistrails.org ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the University of Utah nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### ############################################################################## # Transfer Function Widget for VTK from PyQt4 import QtCore, QtGui from gui.modules.constant_configuration import ConstantWidgetMixin from core.modules.basic_modules import new_constant, init_constant, Module from core.modules.module_registry import get_module_registry from core.system import get_elementtree_library ElementTree = get_elementtree_library() from core.utils.color import ColorByName import vtk import math import pickle import copy ################################################################################ # etc def clamp(v, mn, mx, eps=0.0): mne = mn + eps mxe = mx - eps if v < mne: return mn if v > mxe: return mx return v # Because of a Qt bug see # http://bugreports.qt.nokia.com/browse/QTBUG-17985 # We cannot set the scene from 0 to 1. In this case we will set it # 4000 x 4000 with GLOBAL_SCALE. When the bug is fixed, just set it to 1.0 GLOBAL_SCALE = 4000.0 ############################################################################## # Transfer Function object class TransferFunction(object): def __init__(self): self._min_range = 0.0 self._max_range = 1.0 self._pts = [] def set_range(self, mn, mx): self._min_range = mn self._max_range = mx def set_on_vtk_volume_property(self, vtk_volume_property): # Builds the opacity and color functions of = vtk.vtkPiecewiseFunction() cf = vtk.vtkColorTransferFunction() vp = vtk_volume_property for pt in self._pts: (scalar, opacity, color) = pt # Map scalar to tf range s = self._min_range + (self._max_range - self._min_range) * scalar of.AddPoint(s, opacity) cf.AddRGBPoint(s, color[0], color[1], color[2]) vp.SetScalarOpacity(of) vp.SetColor(cf) def get_vtk_transfer_functions(self): of = vtk.vtkPiecewiseFunction() cf = vtk.vtkColorTransferFunction() for pt in self._pts: (scalar, opacity, color) = pt # Map scalar to tf range s = self._min_range + (self._max_range - self._min_range) * scalar of.AddPoint(s, opacity) cf.AddRGBPoint(s, color[0], color[1], color[2]) return (of,cf) def add_point(self, scalar, opacity, color): self._pts.append((scalar, opacity, color)) self._pts.sort() def get_value(self, scalar): """get_value(scalar): returns the opacity and color linearly interpolated at the value. Useful for adding knots.""" ix = 0 while ix < len(self._pts) and self._pts[ix][0] > scalar: ix += 1 if ix == 0: return (self._pts[0][1], self._pts[0][2]) elif ix == len(self._pts): return (self._pts[-1][1], self._pts[-1][2]) else: u = ((self._pts[ix][0] - scalar) / (self._pts[ix][0] - self._pts[ix-1][0])) do = self._pts[ix][1] - self._pts[ix-1][1] dr = self._pts[ix][2][0] - self._pts[ix-1][2][0] dg = self._pts[ix][2][1] - self._pts[ix-1][2][1] db = self._pts[ix][2][2] - self._pts[ix-1][2][2] return (self._pts[ix-1][1] + u * do, (self._pts[ix-1][2][0] + u * dr, self._pts[ix-1][2][1] + u * dg, self._pts[ix-1][2][2] + u * db)) def __copy__(self): result = TransferFunction() result._min_range = self._min_range result._max_range = self._max_range result._pts = copy.copy(self._pts) return result def __eq__(self, other): if type(other) != type(self): return False if self._min_range != other._min_range: return False if self._max_range != other._max_range: return False for my_pt, other_pt in zip(self._pts, other._pts): if my_pt != other_pt: return False return True def __ne__(self, other): return not self.__eq__(other) def serialize(self, node=None): """serialize(node: ElementTree.Element) -> str Convert this object to an XML representation in string format. """ if node is None: node = ElementTree.Element('transfer_function') node.set('min_range', str(self._min_range)) node.set('max_range', str(self._max_range)) for pt in self._pts: ptNode = ElementTree.SubElement(node, 'point') ptNode.set('scalar', str(pt[0])) ptNode.set('opacity', str(pt[1])) color = pt[2] colorNode = ElementTree.SubElement(ptNode, 'color') colorNode.set('R', str(color[0])) colorNode.set('G', str(color[1])) colorNode.set('B', str(color[2])) return ElementTree.tostring(node) @staticmethod def parse(strNode): """parse(strNode: str) -> TransferFunction Parses a string representing a TransferFunction and returns a TransferFunction object """ try: node = ElementTree.fromstring(strNode) except SyntaxError: #it was serialized using pickle tf = pickle.loads(strNode.decode('hex')) tf._pts.sort() return tf if node.tag != 'transfer_function': return None #read attributes tf = TransferFunction() tf._min_range = float(node.get('min_range', "0.0")) tf._max_range = float(node.get('max_range', "1.0")) for ptNode in node.getchildren(): if ptNode.tag == 'point': scalar = float(ptNode.get('scalar','-1.0')) opacity = float(ptNode.get('opacity', '1.0')) for colorNode in ptNode.getchildren(): if colorNode.tag == 'color': color = (float(colorNode.get('R','0.0')), float(colorNode.get('G','0.0')), float(colorNode.get('B','0.0'))) break tf._pts.append((scalar,opacity,color)) tf._pts.sort() return tf ############################################################################## # Graphics Items class TransferFunctionPoint(QtGui.QGraphicsEllipseItem): selection_pens = { True: QtGui.QPen(QtGui.QBrush( QtGui.QColor(*(ColorByName.get_int('goldenrod_medium')))),GLOBAL_SCALE * 0.012), False: QtGui.QPen() } def __init__(self, scalar, opacity, color, parent=None): QtGui.QGraphicsEllipseItem.__init__(self, parent) self._scalar = scalar self._opacity = opacity self._color = QtGui.QColor(color[0]*255, color[1]*255, color[2]*255) self.setPen(QtGui.QPen(QtGui.QColor(0,0,0))) self.setFlag(QtGui.QGraphicsItem.ItemIsMovable) self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable) self.setFlag(QtGui.QGraphicsItem.ItemIsFocusable) if QtCore.QT_VERSION >= 0x40600: self.setFlag(QtGui.QGraphicsItem.ItemSendsGeometryChanges) self.setZValue(2.0) self._sx = 1.0 self._sy = 1.0 # fixed scale self._fsx = GLOBAL_SCALE self._fsy = GLOBAL_SCALE self._left_line = None self._right_line = None self._point = QtCore.QPointF(scalar * self._fsx, opacity * self._fsy) self.refresh() self.setToolTip("Double-click to change color\n" "Right-click to remove point\n" "Scalar: %.5f, Opacity: %.5f" % (self._scalar, self._opacity)) # This sets up the linked list of Lines def keyPressEvent(self, event): if event.key() == QtCore.Qt.Key_Backspace or \ event.key() == QtCore.Qt.Key_Delete: self.remove_self() def refresh(self): dx = self._fsx * 0.025 / self._sx dy = self._fsy * 0.025/ self._sy # this is the setup self.setBrush(QtGui.QBrush(self._color)) self.setRect(-dx, -dy, 2 * dx, 2 * dy) self.setPos(self._fsx * self._scalar, self._fsy * self._opacity) self.update() def update_scale(self, sx, sy): self._sx = sx self._sy = sy self.refresh() def itemChange(self, change, value): if change == QtGui.QGraphicsItem.ItemSelectedChange: self.setPen(self.selection_pens[value.toBool()]) if change == QtGui.QGraphicsItem.ItemPositionChange: # moves point pt = value.toPointF() pt.setY(clamp(pt.y(), 0.0, 1.0 * self._fsy) ) self._opacity = pt.y() / self._fsy self._point.setY(pt.y()) if not self._left_line: pt.setX(0.0) elif not self._right_line: pt.setX(1.0 * self._fsx) else: assert self._left_line._point_right == self assert self._right_line._point_left == self pt.setX(clamp(pt.x(), self._left_line._point_left._point.x(), self._right_line._point_right._point.x(), 1e-6)) self._point.setX(pt.x()) self._scalar = pt.x() / self._fsx if self._left_line: self._left_line.refresh() if self._right_line: self._right_line.refresh() if self.scene(): self.scene()._tf_poly.setup() self.setToolTip("Double-click to change color\n" "Right-click to remove point\n" "Scalar: %.5f, Opacity: %.5f" % (self._scalar, self._opacity)) return QtGui.QGraphicsItem.itemChange(self, change, QtCore.QVariant(pt)) return QtGui.QGraphicsItem.itemChange(self, change, value) def remove_self(self): if not self._left_line or not self._right_line: # Ignore, self is a corner node that can't be removed return # Removes the right line and self, re-ties data structure self._left_line._point_right = self._right_line._point_right self._left_line._point_right._left_line = self._left_line # be friends with garbage collector self._right_line._point_left = None self._right_line._point_right = None self.scene()._tf_poly.setup() self.scene().removeItem(self._right_line) self.scene().removeItem(self) self._left_line.refresh() def mouseDoubleClickEvent(self, event): new_color = QtGui.QColorDialog.getColor(self._color) if not new_color.isValid(): return self._color = new_color if self._left_line: self._left_line.refresh() if self._right_line: self._right_line.refresh() self.refresh() self.scene()._tf_poly.setup() QtGui.QGraphicsEllipseItem.mouseDoubleClickEvent(self, event) def mousePressEvent(self, event): if event.button() == QtCore.Qt.RightButton: event.accept() self.remove_self() else: QtGui.QGraphicsEllipseItem.mousePressEvent(self, event) def paint(self, painter, option, widget=None): """ paint(painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget) -> None Peform painting of the point without the ugly default dashed-line black square """ painter.setBrush(self.brush()) painter.setPen(self.pen()) painter.drawEllipse(self.rect()) def add_self_to_transfer_function(self, tf): tf.add_point(self._scalar, self._opacity, (self._color.redF(), self._color.greenF(), self._color.blueF())) class TransferFunctionPolygon(QtGui.QGraphicsPolygonItem): def __init__(self): QtGui.QGraphicsPolygonItem.__init__(self) def setup(self): # This inspects the scene, finds the left-most point, and # then builds the polygon traversing the linked list structure if not self.scene(): return pt = self.scene().get_leftmost_point() first_pt = pt self.setZValue(1.25) g = QtGui.QLinearGradient() g.setStart(0.0, 0.5) g.setFinalStop(1.0, 0.5) g.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) p = QtGui.QPen() p.setStyle(QtCore.Qt.NoPen) pts = [QtCore.QPointF(pt.x(), 0)] self.setPen(p) while 1: c = QtGui.QColor(pt._color) c.setAlphaF(pt._opacity) g.setColorAt(pt._scalar, c) pts.append(pt._point) # move cursor fwd if pt._right_line: pt = pt._right_line._point_right else: break self.setBrush(QtGui.QBrush(g)) pts.append(QtCore.QPointF(pt._point.x(), 0)) polygon = QtGui.QPolygonF(pts) self.setPolygon(polygon) class TransferFunctionLine(QtGui.QGraphicsPolygonItem): def __init__(self, point_left, point_right, parent=None): assert point_right._scalar >= point_left._scalar QtGui.QGraphicsPolygonItem.__init__(self, parent) self._point_left = point_left self._point_right = point_right self._point_left._right_line = self self._point_right._left_line = self self.setup(1.0, 1.0) self._sx = 1.0 self._sy = 1.0 # fixed scale self._fsx = GLOBAL_SCALE self._fsy = GLOBAL_SCALE self.setToolTip('') def setup(self, sx, sy): d = self._point_right._point - self._point_left._point d_normal = QtCore.QPointF(d.y(), -d.x()) l = math.sqrt(d.x() * d.x() + d.y() * d.y()) if l != 0.0: d_normal /= l d_normal *= GLOBAL_SCALE * 0.010 d_normal.setX(d_normal.x() / sx) d_normal.setY(d_normal.y() / sy) ps = [self._point_left._point + d_normal, self._point_right._point + d_normal, self._point_right._point - d_normal, self._point_left._point - d_normal] self.setPolygon(QtGui.QPolygonF(ps)) self.setZValue(1.5) # Gradient for filling g = QtGui.QLinearGradient() g.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) g.setStart(self._point_left._scalar, self._point_left._opacity) g.setFinalStop(self._point_right._scalar, self._point_right._opacity) g.setColorAt(0.0, self._point_left._color) g.setColorAt(1.0, self._point_right._color) self.setBrush(QtGui.QBrush(g)) # Gradient for outlining g = QtGui.QLinearGradient() g.setStart(self._point_left._point) g.setFinalStop(self._point_right._point) dark_pl = QtGui.QColor(self._point_left._color.red() * 0.5, self._point_left._color.green() * 0.5, self._point_left._color.blue() * 0.5) dark_pr = QtGui.QColor(self._point_right._color.red() * 0.5, self._point_right._color.green() * 0.5, self._point_right._color.blue() * 0.5) g.setColorAt(0.0, dark_pl) g.setColorAt(1.0, dark_pr) p = QtGui.QPen() p.setBrush(QtGui.QBrush(g)) self.setPen(p) def update_scale(self, sx, sy): self._sx = sx self._sy = sy self.refresh() def refresh(self): self.setup(self._sx, self._sy) def mouseDoubleClickEvent(self, event): p = event.scenePos() c_left = self._point_left._color c_right = self._point_right._color u = ((p.x() - self._point_left._point.x()) / (self._point_right._point.x() - self._point_left._point.x())) new_c = (u * c_right.redF() + (1-u) * c_left.redF(), u * c_right.greenF() + (1-u) * c_left.greenF(), u * c_right.blueF() + (1-u) * c_left.blueF()) new_point = TransferFunctionPoint(p.x()/ self._fsx, p.y()/self._fsy, new_c) new_line = TransferFunctionLine(new_point, self._point_right) new_point._left_line = self self._point_right = new_point self.scene().addItem(new_line) self.scene().addItem(new_point) new_line.update_scale(self._point_left._sx, self._point_left._sy) new_point.update_scale(self._point_left._sx, self._point_left._sy) new_point.refresh() self.refresh() def mousePressEvent(self, event): # This needs to be here, otherwise mouseDoubleClickEvent does # not get called. event.accept() ############################################################################## # Scene, view, widget class TransferFunctionScene(QtGui.QGraphicsScene): def __init__(self, tf, parent=None): QtGui.QGraphicsScene.__init__(self, parent) self._tf_items = [] poly = TransferFunctionPolygon() poly.setup() self._tf_poly = poly self.addItem(poly) self.create_tf_items(tf) self._tf_poly.setup() #current scale self._sx = 1.0 self._sy = 1.0 # Add outlines line_color = QtGui.QColor(200, 200, 200) pen = QtGui.QPen(line_color) ps = [QtCore.QPointF(0.0, 0.0), QtCore.QPointF(GLOBAL_SCALE, 0.0), QtCore.QPointF(GLOBAL_SCALE, GLOBAL_SCALE), QtCore.QPointF(0.0, GLOBAL_SCALE)] outline = QtGui.QPolygonF(ps) self.addPolygon(outline, pen) for i in xrange(51): u = GLOBAL_SCALE * float(i) / 50.0 self.addLine(QtCore.QLineF(u, 0.0, u, GLOBAL_SCALE), pen) self.addLine(QtCore.QLineF(0.0, u, GLOBAL_SCALE, u), pen) def reset_transfer_function(self, tf): self.create_tf_items(tf) self.update_scale(self._sx, self._sy) self._tf_poly.setup() def removeItem(self, item): if item in self._tf_items: self._tf_items.remove(item) QtGui.QGraphicsScene.removeItem(self, item) def addItem(self, item): # Ugly, but hey if isinstance(item, TransferFunctionLine) or \ isinstance(item, TransferFunctionPoint): self._tf_items.append(item) QtGui.QGraphicsScene.addItem(self, item) def create_tf_items(self, tf): items = copy.copy(self._tf_items) for item in items: self.removeItem(item) self._tf_items = [] if len(tf._pts) == 0: pt_left = TransferFunctionPoint(0.0, 0.0, (0.0, 0.0, 0.0)) pt_right = TransferFunctionPoint(1.0, 0.0, (0.0, 0.0, 0.0)) line = TransferFunctionLine(pt_left, pt_right) self.addItem(pt_left) self.addItem(pt_right) self.addItem(line) else: pts = [TransferFunctionPoint(*pt) for pt in tf._pts] lines = [TransferFunctionLine(pt_l, pt_r) for (pt_l, pt_r) in zip(pts[:-1], pts[1:])] for pt in pts: self.addItem(pt) for line in lines: self.addItem(line) def add_knot(self, scalar, opacity): pass def update_scale(self, sx, sy): for item in self._tf_items: item.update_scale(sx, sy) self._sx = sx self._sy = sy def get_leftmost_point(self): pt = None for item in self._tf_items: if hasattr(item, '_left_line') and not item._left_line: pt = item break assert pt return pt def get_transfer_function(self): result = TransferFunction() pt = self.get_leftmost_point() while 1: pt.add_self_to_transfer_function(result) if pt._right_line: pt = pt._right_line._point_right else: break return result class TransferFunctionView(QtGui.QGraphicsView): def __init__(self, parent=None): QtGui.QGraphicsView.__init__(self, parent) self.setRenderHint(QtGui.QPainter.Antialiasing) self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) def resizeEvent(self, event): self.resetMatrix() self.setMatrix(QtGui.QMatrix(event.size().width() / (GLOBAL_SCALE *10.0/9) , 0, 0, -event.size().height() / (GLOBAL_SCALE*10.0/9), 0, 0)) self.scene().update_scale(event.size().width()/(2000.0/9), event.size().height()/(2000.0/9)) def focusOutEvent(self, event): self.parent().update_parent() QtGui.QGraphicsView.focusOutEvent(self, event) default_tf = TransferFunction() default_tf.add_point(0.0, 0.0, (0.0, 0.0, 0.0)) default_tf.add_point(1.0, 0.0, (0.0, 0.0, 0.0)) class TransferFunctionWidget(QtGui.QWidget, ConstantWidgetMixin): def __init__(self, param, parent=None): QtGui.QWidget.__init__(self, parent) ConstantWidgetMixin.__init__(self, param.strValue) if not param.strValue: self._tf = copy.copy(default_tf) else: self._tf = TransferFunction.parse(param.strValue) self._scene = TransferFunctionScene(self._tf, self) layout = QtGui.QVBoxLayout() self.setLayout(layout) self._view = TransferFunctionView(self) self._view.setScene(self._scene) self._view.setMinimumSize(200,200) self._view.setMaximumHeight(280) self._view.show() self._view.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) self._view.setMatrix(QtGui.QMatrix(1, 0, 0, -1, 0, 0)) self.setMinimumSize(260,240) caption = QtGui.QLabel("Double-click on the line to add a point") font = QtGui.QFont('Arial', 11) font.setItalic(True) caption.setFont(font) layout.addWidget(self._view) layout.addWidget(caption) def contents(self): return self._scene.get_transfer_function().serialize() def setContents(self, strValue, silent=True): if not strValue: self._tf = copy.copy(default_tf) else: self._tf = TransferFunction.parse(strValue) self._scene.reset_transfer_function(self._tf) if not silent: self.update_parent() ############################################################################## # Helper module to adjust range class vtkScaledTransferFunction(Module): def compute(self): reg = get_module_registry() tf = self.getInputFromPort('TransferFunction') new_tf = copy.copy(tf) if self.hasInputFromPort('Input'): port = self.getInputFromPort('Input') algo = port.vtkInstance.GetProducer() output = algo.GetOutput(port.vtkInstance.GetIndex()) (new_tf._min_range, new_tf._max_range) = output.GetScalarRange() elif self.hasInputFromPort('Dataset'): algo = self.getInputFromPort('Dataset').vtkInstance output = algo (new_tf._min_range, new_tf._max_range) = output.GetScalarRange() else: (new_tf._min_range, new_tf._max_range) = self.getInputFromPort('Range') self.setResult('TransferFunction', new_tf) (of,cf) = new_tf.get_vtk_transfer_functions() of_module = reg.get_descriptor_by_name('edu.utah.sci.vistrails.vtk', 'vtkPiecewiseFunction').module() of_module.vtkInstance = of cf_module = reg.get_descriptor_by_name('edu.utah.sci.vistrails.vtk', 'vtkColorTransferFunction').module() cf_module.vtkInstance = cf self.setResult('vtkPicewiseFunction', of_module) self.setResult('vtkColorTransferFunction', cf_module) string_conversion = staticmethod(lambda x: x.serialize()) conversion = staticmethod(lambda x: TransferFunction.parse(x)) validation = staticmethod(lambda x: isinstance(x, TransferFunction)) TransferFunctionConstant = new_constant('TransferFunction', conversion, default_tf, validation, TransferFunctionWidget) TransferFunctionConstant.translate_to_string = string_conversion ############################################################################## def initialize(): init_constant(TransferFunctionConstant) ############################################################################## import unittest class TestTransferFunction(unittest.TestCase): def test_serialization(self): tf = TransferFunction() tf._min_range = 0.1 tf._max_range = 2.0 tf._pts.append((0.3,0.5,(1.0,1.0,1.0))) tf._pts.append((0.6,0.7,(1.0,0.5,1.0))) tf._pts.append((0.2,0.8,(1.0,0.0,1.0))) tf._pts.sort() #simulate old serialization method ser1 = pickle.dumps(tf).encode('hex') ser2 = tf.serialize() tf1 = TransferFunction.parse(ser1) tf2 = TransferFunction.parse(ser2) assert tf == tf1 assert tf == tf2 assert tf1 == tf2 if __name__ == "__main__": unittest.main()
CMUSV-VisTrails/WorkflowRecommendation
vistrails/packages/vtk/tf_widget.py
Python
bsd-3-clause
28,144
[ "VTK" ]
7885d144da1bc9ebe8ef847df80153daac18a7c4c3d829e0228d35208dd7e9e3
import os import shutil import json import logging from pychemia.code.vasp import VaspJob, VaspOutput, VaspInput from pychemia.crystal import KPoints from pychemia.code.vasp import read_poscar from pychemia.utils.mathematics import round_small __author__ = 'Guillermo Avendano-Franco' class RelaxPopulation: def __init__(self, population, basedir, target_force=1E-2, target_stress=1E-2): self.population = population self.basedir = basedir self.vasp_jobs = {} self.runs = {} self.runner = None self.status = {} self.target_force = target_force self.target_stress = target_stress def create_dirs(self, clean=False): if not os.path.isdir(self.basedir): os.makedirs(self.basedir) elif clean: for i in os.listdir(self.basedir): shutil.rmtree(self.basedir + os.sep + i) for i in self.population.pcdb.entries.find(): name = self.basedir + os.sep + str(i['_id']) if not os.path.isdir(name): os.mkdir(name) def create_inputs(self, kp_density=10000, encut=1.0): # kpoints = KPoints(kmode='gamma', grid=[4, 4, 4]) for entry in self.population.pcdb.entries.find(): name = str(entry['_id']) workdir = self.basedir + os.sep + name structure = self.population.db.get_structure(entry['_id']) kpoints = KPoints.optimized_grid(structure.lattice, kp_density=kp_density) print(kpoints) vj = VaspJob(workdir=workdir) vj.initialize(structure=structure, kpoints=kpoints) inp = VaspInput() inp.set_rough_relaxation() vj.set_input_variables(inp) vj.write_potcar() vj.input_variables.set_encut(ENCUT=encut, POTCAR=workdir + os.sep + 'POTCAR') vj.set_inputs() self.vasp_jobs[name] = vj self.runs[name] = 0 self.status[name] = ['ACTIVE'] def add_status(self, entry_id, value): if value not in self.status[entry_id]: self.status[entry_id].append(value) def del_status(self, entry_id, value): if value in self.status[entry_id]: self.status[entry_id].remove(value) def flip_status(self, entry_id, oldvalue, newvalue): self.del_status(entry_id, oldvalue) self.add_status(entry_id, newvalue) def modify_input(self, entry_id): if 'RELAXED' not in self.status[entry_id] and 'NOPROCAR' not in self.status[entry_id] \ and 'NOOUTCAR' not in self.status[entry_id]: return True else: return False def update(self, workdir): """ This routine determines how to proceed with the relaxation for one specific work directory :param workdir: (str) String representation of the id in the mongodb :return: """ # workdir = self.basedir + os.sep + entry_id entry_id = os.path.basename(workdir) vj = self.vasp_jobs[entry_id] runj = self.runs[entry_id] if os.path.isfile(workdir + os.sep + 'OUTCAR'): vj.get_outputs() self.update_history(entry_id) if os.path.isfile(workdir + os.sep + 'RELAXED'): self.add_status(entry_id, 'RELAXED') elif not os.path.isfile(workdir + os.sep + 'PROCAR'): self.add_status(entry_id, 'NOPROCAR') else: self.del_status(entry_id, 'NOPROCAR') if not os.path.isfile(workdir + os.sep + 'OUTCAR'): self.add_status(entry_id, 'NOOUTCAR') else: self.del_status(entry_id, 'NOOUTCAR') print('-') vo = VaspOutput(workdir + os.sep + 'OUTCAR') relaxation_info = vo.relaxation_info() if len(relaxation_info) != 3: print('[' + str(entry_id) + ']' + ' Missing some data in OUTCAR (forces or stress)') self.add_status(entry_id, 'NOOUTCAR') print('[' + str(entry_id) + ']' + 'Results:') for i in relaxation_info: print('[' + str(entry_id) + '] %20s %12.5e' % (i, relaxation_info[i])) # Conditions to consider the structure relaxed if relaxation_info['avg_force'] < self.target_force: if relaxation_info['avg_stress_diag'] < self.target_stress: if relaxation_info['avg_stress_non_diag'] < self.target_stress: wf = open(workdir + os.sep + 'RELAXED', 'w') for i in relaxation_info: wf.write("%15s %12.3f" % (i, relaxation_info[i])) wf.close() wf = open(workdir + os.sep + 'COMPLETE', 'w') for i in relaxation_info: wf.write("%15s %12.3f" % (i, relaxation_info[i])) wf.close() self.add_status(entry_id, 'RELAXED') if self.modify_input(entry_id): # How to change ISIF if relaxation_info['avg_force'] < 0.1: if relaxation_info['avg_stress_diag'] < 0.1: if relaxation_info['avg_stress_non_diag'] < 0.1: vj.input_variables.variables['ISIF'] = 3 else: vj.input_variables.variables['ISIF'] = 3 else: vj.input_variables.variables['ISIF'] = 3 else: vj.input_variables.variables['ISIF'] = 2 # How to change IBRION # if info['avg_force'] < 0.1 and info['avg_stress_diag'] < 0.1 and info['avg_stress_non_diag'] < 0.1: # vj.input_variables.variables['IBRION'] = 1 # elif info['avg_force'] < 1 and info['avg_stress_diag'] < 1 and info['avg_stress_non_diag'] < 1: # vj.input_variables.variables['IBRION'] = 2 # else: # vj.input_variables.variables['IBRION'] = 3 # How to change EDIFF if vj.input_variables.variables['EDIFF'] > 2 * 1E-4: vj.input_variables.variables['EDIFF'] = round_small(vj.input_variables.variables['EDIFF'] / 2) else: vj.input_variables.variables['EDIFF'] = 1E-4 # How to change EDIFFG if vj.input_variables.variables['EDIFFG'] < - 2 * self.target_force: vj.input_variables.variables['EDIFFG'] = round_small(vj.input_variables.variables['EDIFFG'] / 2) else: vj.input_variables.variables['EDIFFG'] = - self.target_force # Print new values print('[' + str(entry_id) + ']' + 'New Values:') for i in ['ISIF', 'IBRION', 'EDIFF', 'EDIFFG']: print('[' + str(entry_id) + ']' + i + ' : ', vj.input_variables.variables[i]) print('-') for i in ['OUTCAR']: if not os.path.exists(workdir + os.sep + i): wf = open(workdir + os.sep + i, 'w') wf.write('') wf.close() log = logging.handlers.RotatingFileHandler(workdir + os.sep + i, maxBytes=1, backupCount=1000) log.doRollover() try: vj.structure = read_poscar(workdir + os.sep + 'CONTCAR') except ValueError: print('Error reading CONTCAR') vj.set_inputs() properties = vj.outcar status = self.status[entry_id] newentry = self.population.db.update(entry_id, structure=vj.structure, properties=properties, status=status) vj.save_json(workdir + os.sep + 'vaspjob.json') wf = open(workdir + os.sep + 'entry.json', 'w') json.dump(newentry, wf, sort_keys=True, indent=4, separators=(',', ': ')) wf.close() return True else: vj.set_inputs() status = self.status[entry_id] newentry = self.population.db.update(entry_id, structure=vj.structure, status=status) vj.save_json(workdir + os.sep + 'vaspjob.json') wf = open(workdir + os.sep + 'entry.json', 'w') json.dump(newentry, wf, sort_keys=True, indent=4, separators=(',', ': ')) wf.close() return True def update_history(self, entry_id): filename = 'pychemia_relaxation.json' filepath = self.basedir + os.sep + entry_id + os.sep + filename if not os.path.exists(filepath): wf = open(filepath, 'w') data = [self.vasp_jobs[entry_id].to_dict] json.dump(data, wf, sort_keys=True, indent=4, separators=(',', ': ')) wf.close() else: rf = open(filepath, 'r') data = json.load(rf) rf.close() data.append(self.vasp_jobs[entry_id].to_dict) wf = open(filepath, 'w') json.dump(data, wf, sort_keys=True, indent=4, separators=(',', ': ')) wf.close() @property def workdirs(self): return [self.basedir + os.sep + name for name in self.population.members] @property def active_workdirs(self): return [self.basedir + os.sep + name for name in self.population.actives] def run(self, runner): entries_ids = self.population.members def worker(workdir): wf = open(workdir + os.sep + 'LOCK', 'w') wf.write('') wf.close() runner.run() os.remove(workdir + os.sep + 'LOCK') def checker(workdir): if os.path.isfile(workdir + os.sep + 'LOCK'): return False return self.update(workdir) workdirs = [self.basedir + os.sep + i for i in self.population.actives] runner.run_multidirs(workdirs, worker, checker) if not self.is_running: self.run(runner) def set_run(self, code, runner, basedir, kp_density=10000, encut=1.1): self.runner = runner self.create_dirs(clean=True) self.create_inputs(kp_density=kp_density, encut=encut)
MaterialsDiscovery/PyChemia
pychemia/evaluator/vasp_evaluator.py
Python
mit
10,280
[ "CRYSTAL", "VASP" ]
7d91d18cfebf58c677224bbb9cc0e26f7f4f1fdbb602d237bd5ae2313f7fee8b
# -*- coding: utf-8 -*- #------------------------------------------------------------------------------ # Name: corpus/__init__.py # Purpose: Shortcuts to the corpus collection # # Authors: Christopher Ariza # Michael Scott Cuthbert # # Copyright: Copyright © 2009, 2015 Michael Scott Cuthbert and the music21 Project # License: LGPL or BSD, see license.txt #------------------------------------------------------------------------------ ''' The music21 corpus includes a collection of freely distributable music in MusicXML, Humdrum, and other representations. The corpus package is an interface for easily working with this data. To see a complete listing of the works in the music21 corpus, visit :ref:`referenceCorpus`. Note that music21 does not own most of the music in the corpus -- it has been licensed to us (or in a free license). It may not be free in all parts of the world, but to the best of our knowledge is true for the US. ''' from __future__ import unicode_literals import re import os import unittest import zipfile from music21 import common from music21 import converter from music21 import exceptions21 from music21 import metadata from music21.corpus import chorales from music21.corpus import virtual from music21.corpus import corpora from music21.corpus.corpora import * from music21 import environment _MOD = "corpus.base.py" environLocal = environment.Environment(_MOD) #------------------------------------------------------------------------------ class CorpusException(exceptions21.Music21Exception): pass #------------------------------------------------------------------------------ def getCorePaths(fileExtensions=None, expandExtensions=True): ''' Get all paths in the corpus that match a known extension, or an extenion provided by an argument. If `expandExtensions` is True, a format for an extension, and related extensions, will replaced by all known input extensions. This is convenient when an input format might match for multiple extensions. >>> corpusFilePaths = corpus.getCorePaths() >>> cpl = len(corpusFilePaths) >>> 2550 < cpl < 2600 True >>> kernFilePaths = corpus.getCorePaths('krn') >>> len(kernFilePaths) >= 500 True >>> abcFilePaths = corpus.getCorePaths('abc') >>> len(abcFilePaths) >= 100 True ''' return corpora.CoreCorpus().getPaths( fileExtensions=fileExtensions, expandExtensions=expandExtensions, ) def getVirtualPaths(fileExtensions=None, expandExtensions=True): ''' Get all paths in the virtual corpus that match a known extension. An extension of None will return all known extensions. >>> len(corpus.getVirtualPaths()) > 6 True ''' return corpora.VirtualCorpus().getPaths( fileExtensions=fileExtensions, expandExtensions=expandExtensions, ) def getLocalPaths(fileExtensions=None, expandExtensions=True): ''' Access files in additional directories supplied by the user and defined in environment settings in the 'localCorpusSettings' list. If additional paths are added on a per-session basis with the :func:`~music21.corpus.addPath` function, these paths are also returned with this method. ''' return corpora.LocalCorpus().getPaths( fileExtensions=fileExtensions, expandExtensions=expandExtensions, ) def addPath(filePath): ''' Add a directory path to the Local Corpus on a *temporary* basis, i.e., just for the current Python session. All directories contained within the provided directory will be searched for files with file extensions matching the currently readable file types. Any number of file paths can be added one at a time. An error will be raised if the file path does not exist, is already defined as a temporary, or is already being searched by being defined with the :class:`~music21.environment.Environment` 'localCorpusSettings' setting. To permanently add a path to the list of stored local corpus paths, set the 'localCorpusPath' or 'localCorpusSettings' setting of the :class:`~music21.environment.UserSettings` object. >>> #_DOCS_SHOW corpus.addPath('~/Documents') Alternatively, add a directory permanently (see link above for more details): >>> #_DOCS_SHOW us = environment.UserSettings() >>> #_DOCS_SHOW us['localCorpusPath'] = 'd:/desktop/' Restart music21 after adding paths. ''' corpora.LocalCorpus().addPath(filePath) def getPaths( fileExtensions=None, expandExtensions=True, name=('local', 'core', 'virtual'), ): ''' Get paths from core, virtual, and/or local corpora. This is the public interface for getting all corpus paths with one function. ''' paths = [] if 'core' in name: paths += corpora.CoreCorpus().getPaths( fileExtensions=fileExtensions, expandExtensions=expandExtensions, ) if 'local' in name: paths += corpora.LocalCorpus().getPaths( fileExtensions=fileExtensions, expandExtensions=expandExtensions, ) if 'virtual' in name: paths += corpora.VirtualCorpus().getPaths( fileExtensions=fileExtensions, expandExtensions=expandExtensions, ) return paths #------------------------------------------------------------------------------ # metadata routines def _updateMetadataBundle(): ''' Load the metadata bundle from JSON and store it in the module global variable _METADATA_BUNDLES, unless the _METADATA_BUNDLES have already been built, in which case, don't do it. This relies on the functions `getCorePaths()`, `getVirtualPaths()`, and `getLocalPaths()`. Note that this updates the in-memory cached metdata bundles not the disk caches (that's MUCH slower!) to do that run corpus.metadata.metadata.py ''' corpora.Corpus._updateAllMetadataBundles() def cacheMetadata(corpusNames=('local',), verbose=True): ''' Rebuild the metadata cache. ''' if not common.isIterable(corpusNames): corpusNames = [corpusNames] for name in corpusNames: corpora.Corpus._metadataBundles[name] = None metadata.cacheMetadata(corpusNames, verbose=verbose) def search( query, field=None, corpusNames=('core', 'virtual', 'local'), fileExtensions=None, ): r''' Search all stored metadata and return a list of file paths; to return a list of parsed Streams, use `searchParse()`. The `name` parameter can be used to specify one of three corpora: core (included with music21), virtual (defined in music21 but hosted online), and local (hosted on the user's system (not yet implemented)). This method uses stored metadata and thus, on first usage, will incur a performance penalty during metadata loading. >>> corpus.search('china') <music21.metadata.bundles.MetadataBundle {1235 entries}> >>> corpus.search('bach', field='composer') <music21.metadata.bundles.MetadataBundle {21 entries}> >>> corpus.search('coltrane', corpusNames=('virtual',)) <music21.metadata.bundles.MetadataBundle {1 entry}> ''' return corpora.Corpus.search( query, field=field, corpusNames=corpusNames, fileExtensions=fileExtensions, ) #------------------------------------------------------------------------------ def getComposer(composerName, fileExtensions=None): ''' Return all filenames in the corpus that match a composer's or a collection's name. An `fileExtensions`, if provided, defines which extensions are returned. An `fileExtensions` of None (default) returns all extensions. Note that xml and mxl are treated equivalently. >>> a = corpus.getComposer('schoenberg') >>> len(a) > 1 True >>> a = corpus.getComposer('bach', 'krn') >>> len(a) < 10 True >>> a = corpus.getComposer('bach', 'xml') >>> len(a) > 10 True ''' return corpora.CoreCorpus().getComposer( composerName, fileExtensions=fileExtensions, ) def getComposerDir(composerName): ''' Given the name of a composer, get the path to the top-level directory of that composer: >>> import os >>> a = corpus.getComposerDir('bach') >>> a.endswith(os.path.join('corpus', os.sep, 'bach')) True ''' return corpora.CoreCorpus().getComposerDirectoryPath(composerName) @property def noCorpus(): ''' Return True or False if this is a `corpus` or `noCoprus` distribution. >>> corpus.noCorpus False ''' return corpora.CoreCorpus.noCorpus #------------------------------------------------------------------------------ def getWorkList(workName, movementNumber=None, fileExtensions=None): ''' Search the corpus and return a list of filenames of works, always in a list. If no matches are found, an empty list is returned. >>> len(corpus.getWorkList('schumann_clara', 3, '.xml')) 1 Make sure that 'verdi' just gets the single Verdi piece and not the Monteverdi pieces: >>> len(corpus.getWorkList('verdi')) 1 ''' return corpora.CoreCorpus().getWorkList( workName, movementNumber=movementNumber, fileExtensions=fileExtensions, ) def getVirtualWorkList(workName, movementNumber=None, fileExtensions=None): ''' Given a work name, search all virtual works and return a list of URLs for any matches. >>> corpus.getVirtualWorkList('bach/bwv1007/prelude') ['http://kern.ccarh.org/cgi-bin/ksdata?l=cc/bach/cello&file=bwv1007-01.krn&f=xml'] >>> corpus.getVirtualWorkList('junk') [] ''' return corpora.VirtualCorpus().getWorkList( workName, movementNumber=movementNumber, fileExtensions=fileExtensions, ) #------------------------------------------------------------------------------ def getWorkReferences(): ''' Return a data dictionary for all works in the corpus Returns a list of corpus.corpora.DirectoryInformation object, one for each directory. A 'works' dictionary for each composer provides references to dictionaries for all associated works. This is used in the generation of corpus documentation >>> workRefs = corpus.getWorkReferences() >>> workRefs[1:3] [<music21.corpus.corpora.DirectoryInformation bach>, <music21.corpus.corpora.DirectoryInformation beethoven>] No longer finds the VirtualCorpus. TODO: Reinstate when that corpus becomes useful again... ''' results = [di for di in corpora.CoreCorpus().directoryInformation] # for vw in corpora.VirtualCorpus._virtual_works: # composerDir = vw.corpusPath.split('/')[0] # match = False # for ref in results: # # check composer reference or first part of corpusPath # if (ref['composer'] == vw.composer or # composerDir == ref['composerDir']): # match = True # break # use this ref # if not match: # new composers, create a new ref # ref = {} # ref['composer'] = vw.composer # ref['composerDir'] = composerDir # ref['works'] = {} # store by keys of name/dirname # # work stub should be everything other than top-level # workStub = vw.corpusPath.replace(composerDir + '/', '') # ref['works'][workStub] = {} # ref['works'][workStub]['virtual'] = True # ref['works'][workStub]['files'] = [] # ref['works'][workStub]['title'] = vw.title # for url in vw.urlList: # m21Format, ext = common.findFormatExtURL(url) # fileDict = {} # fileDict['format'] = m21Format # fileDict['ext'] = ext # # all path parts after corpus # fileDict['corpusPath'] = vw.corpusPath # fileDict['title'] = vw.title # fileDict['url'] = url # ref['works'][workStub]['files'].append(fileDict) # if not match: # not found already, need to add # results.append(ref) return results #------------------------------------------------------------------------------ def getWork(workName, movementNumber=None, fileExtensions=None): ''' Search the corpus, then the virtual corpus, for a work, and return a file path or URL. N.B. does not parse the work: but it's suitable for passing to converter.parse. This method will return either a list of file paths or, if there is a single match, a single file path. If no matches are found an Exception is raised. >>> import os >>> a = corpus.getWork('luca/gloria') >>> a.endswith(os.path.sep.join([ ... 'luca', 'gloria.xml'])) True >>> trecentoFiles = corpus.getWork('trecento') >>> len(trecentoFiles) > 100 and len(trecentoFiles) < 200 True ''' if not common.isListLike(fileExtensions): fileExtensions = [fileExtensions] results = getWorkList(workName, movementNumber, fileExtensions) if len(results) == 0: if common.isListLike(workName): workName = os.path.sep.join(workName) if workName.endswith(".xml"): # might be compressed MXL file newWorkName = workName[0:len(workName) - 4] + ".mxl" return getWork(newWorkName, movementNumber, fileExtensions) results = getVirtualWorkList(workName, movementNumber, fileExtensions) if len(results) == 1: return results[0] elif len(results) == 0: raise CorpusException( 'Could not find a file/url that met these criteria') return results def parse( workName, movementNumber=None, number=None, fileExtensions=None, forceSource=False, format=None # @ReservedAssignment ): ''' The most important method call for corpus. Similar to the :meth:`~music21.converter.parse` method of converter (which takes in a filepath on the local hard drive), this method searches the corpus (including the virtual corpus) for a work fitting the workName description and returns a :class:`music21.stream.Stream`. If `movementNumber` is defined, and a movement is included in the corpus, that movement will be returned. If `number` is defined, and the work is a collection with multiple components, that work number will be returned. For instance, some of our ABC documents contain dozens of folk songs within a single file. Advanced: if `forceSource` is True, the original file will always be loaded freshly and pickled (e.g., pre-parsed) files will be ignored. This should not be needed if the file has been changed, since the filetime of the file and the filetime of the pickled version are compared. But it might be needed if the music21 parsing routine has changed. Example, get a chorale by Bach. Note that the source type does not need to be specified, nor does the name Bach even (since it's the only piece with the title BWV 66.6) >>> bachChorale = corpus.parse('bwv66.6') >>> len(bachChorale.parts) 4 After parsing, the file path within the corpus is stored as `.corpusFilePath` >>> bachChorale.corpusFilepath 'bach/bwv66.6.mxl' ''' return corpora.Corpus.parse( workName, movementNumber=movementNumber, number=number, fileExtensions=fileExtensions, forceSource=forceSource, format=format ) def _addCorpusFilepath(streamObj, filePath): # metadata attribute added to store the file path, # for use later in identifying the score #if streamObj.metadata == None: # streamObj.insert(metadata.Metadata()) corpusFilePath = common.getCorpusFilePath() lenCFP = len(corpusFilePath) + len(os.sep) if filePath.startswith(corpusFilePath): fp2 = filePath[lenCFP:] ### corpus fix for windows dirsEtc = fp2.split(os.sep) fp3 = '/'.join(dirsEtc) streamObj.corpusFilepath = fp3 else: streamObj.corpusFilepath = filePath @common.deprecated("1999?","by early 2016", "Use corpus.parse() instead.") def parseWork(*arguments, **keywords): ''' This function exists for backwards compatibility. All calls should use :func:`~music21.corpus.parse` instead. ''' return parse(*arguments, **keywords) #------------------------------------------------------------------------------ # compression def compressAllXMLFiles(deleteOriginal=False): ''' Takes all filenames in corpus.paths and runs :meth:`music21.corpus.compressXML` on each. If the musicXML files are compressed, the originals are deleted from the system. ''' environLocal.warn("Compressing musicXML files...") for filename in getPaths(fileExtensions=('.xml',)): compressXML(filename, deleteOriginal=deleteOriginal) environLocal.warn( 'Compression complete. ' 'Run the main test suite, fix bugs if necessary,' 'and then commit modified directories in corpus.' ) def compressXML(filename, deleteOriginal=False): ''' Takes a filename, and if the filename corresponds to a musicXML file with an .xml extension, creates a corresponding compressed .mxl file in the same directory. If deleteOriginal is set to True, the original musicXML file is deleted from the system. ''' if not filename.endswith('.xml'): return # not a musicXML file environLocal.warn("Updating file: {0}".format(filename)) filenameList = filename.split(os.path.sep) # find the archive name (name w/out filepath) archivedName = filenameList.pop() # new archive name filenameList.append(archivedName[0:len(archivedName) - 4] + ".mxl") newFilename = os.path.sep.join(filenameList) # new filename # contents of container.xml file in META-INF folder container = '<?xml version="1.0" encoding="UTF-8"?>\n\ <container>\n\ <rootfiles>\n\ <rootfile full-path="{0}"/>\n\ </rootfiles>\n\ </container>\n\ '.format(archivedName) # Export container and original xml file to system as a compressed XML. with zipfile.ZipFile( newFilename, 'w', compression=zipfile.ZIP_DEFLATED, ) as myZip: myZip.write(filename=filename, archivedName=archivedName) myZip.writestr( zinfo_or_archivedName='META-INF{0}container.xml'.format( os.path.sep), bytes=container, ) # Delete uncompressed xml file from system if deleteOriginal: os.remove(filename) def uncompressMXL(filename, deleteOriginal=False): ''' Takes a filename, and if the filename corresponds to a compressed musicXML file with an .mxl extension, creates a corresponding uncompressed .xml file in the same directory. If deleteOriginal is set to True, the original compressed musicXML file is deleted from the system. ''' if not filename.endswith(".mxl"): return # not a musicXML file environLocal.warn("Updating file: {0}".format(filename)) filenames = filename.split(os.path.sep) # find the archive name (name w/out filepath) archivedName = filenames.pop() unarchivedName = os.path.splitext(archivedName)[0] + '.xml' extractPath = os.path.sep.join(filenames) # Export container and original xml file to system as a compressed XML. with zipfile.ZipFile( filename, 'r', compression=zipfile.ZIP_DEFLATED, ) as myZip: myZip.extract(member=unarchivedName, path=extractPath) # Delete uncompressed xml file from system if deleteOriginal: os.remove(filename) #------------------------------------------------------------------------------ # libraries # additional libraries to define def getBachChorales(fileExtensions='xml'): r''' Return the file name of all Bach chorales. By default, only Bach Chorales in xml format are returned, because the quality of the encoding and our parsing of those is superior. N.B. Look at the module corpus.chorales for many better ways to work with the chorales. >>> a = corpus.getBachChorales() >>> len(a) > 400 True >>> a = corpus.getBachChorales('krn') >>> len(a) > 10 False >>> a = corpus.getBachChorales('xml') >>> len(a) > 400 True >>> #_DOCS_SHOW a[0] >>> '/Users/cuthbert/Documents/music21/corpus/bach/bwv1.6.mxl' #_DOCS_HIDE '/Users/cuthbert/Documents/music21/corpus/bach/bwv1.6.mxl' ''' cc = corpora.CoreCorpus() return cc.getBachChorales(fileExtensions=fileExtensions,) def getMonteverdiMadrigals(fileExtensions='xml'): ''' Return a list of the filenames of all Monteverdi madrigals. >>> a = corpus.getMonteverdiMadrigals() >>> len(a) > 40 True ''' return corpora.CoreCorpus().getMonteverdiMadrigals( fileExtensions=fileExtensions, ) #------------------------------------------------------------------------------ if __name__ == "__main__": import music21 music21.mainTest()
arnavd96/Cinemiezer
myvenv/lib/python3.4/site-packages/music21/corpus/__init__.py
Python
mit
21,355
[ "VisIt" ]
5f26c3492a52aa1a5c23ead57bda5402f183296a289552f846d668d58df9c1c5
''' Individual stages of the pipeline implemented as functions from input files to output files. The run_stage function knows everything about submitting jobs and, given the state parameter, has full access to the state of the pipeline, such as config, options, DRMAA and the logger. ''' from utils import safe_make_dir from runner import run_stage import os # PICARD_JAR = '$PICARD_HOME/lib/picard-1.69.jar' # PICARD_JAR = '/vlsci/VR0002/kmahmood/Programs/Picard/picard-tools-2.8.3/picard.jar' PICARD_JAR = '/usr/local/easybuild/software/picard/2.3.0/picard.jar' SNPEFF_JAR = '/usr/local/easybuild/software/snpEff/4.1d-Java-1.7.0_80/snpEff.jar' GATK_JAR = '$GATK_HOME/GenomeAnalysisTK.jar' def java_command(jar_path, mem_in_gb, command_args): '''Build a string for running a java command''' # Bit of room between Java's max heap memory and what was requested. # Allows for other Java memory usage, such as stack. java_mem = mem_in_gb - 2 return 'java -Xmx{mem}g -jar {jar_path} {command_args}'.format( jar_path=jar_path, mem=java_mem, command_args=command_args) def run_java(state, stage, jar_path, mem, args): command = java_command(jar_path, mem, args) run_stage(state, stage, command) class Stages(object): def __init__(self, state): self.state = state self.reference = self.get_options('ref_grch37') self.dbsnp_hg19 = self.get_options('dbsnp_hg19') self.mills_hg19 = self.get_options('mills_hg19') self.one_k_g_snps = self.get_options('one_k_g_snps') self.one_k_g_indels = self.get_options('one_k_g_indels') self.one_k_g_highconf_snps = self.get_options('one_k_g_highconf_snps') self.hapmap = self.get_options('hapmap') # self.interval_hg19 = self.get_options('exome_bed_hg19') # self.CEU_mergeGvcf = self.get_options('CEU_mergeGvcf') self.snpeff_conf = self.get_options('snpeff_conf') self.bamclipper = self.get_options('bamclipper') self.vep_path = self.get_options('vep_path') self.vt_path = self.get_options('vt_path') self.coord_file = self.get_options('coord_file') self.target_bed = self.get_options('target_bed') # self.interval_file = self.get_options('interval_file') self.primer_file = self.get_options('primer_file') self.primer_bedpe_file = self.get_options('primer_bedpe_file') self.proportionthresh = self.get_options('proportionthresh') self.absthresh = self.get_options('absthresh') self.maxvariants = self.get_options('maxvariants') # self.fragment_bed = self.get_options('fragment_bed') self.annolua = self.get_options('annolua') self.anno = self.get_options('anno') self.hrfile = self.get_options('hrfile') self.other_vep = self.get_options('other_vep') self.snpeff_path = self.get_options('snpeff_path') self.gatk_bed = self.get_options('gatk_bed') # self.GBR_mergeGvcf = self.get_options('GBR_mergeGvcf') # self.FIN_mergeGvcf = self.get_options('FIN_mergeGvcf') def run_picard(self, stage, args): mem = int(self.state.config.get_stage_options(stage, 'mem')) return run_java(self.state, stage, PICARD_JAR, mem, args) def run_snpeff(self, stage, args): mem = int(self.state.config.get_stage_options(stage, 'mem')) return run_java(self.state, stage, SNPEFF_JAR, mem, args) def run_gatk(self, stage, args): mem = int(self.state.config.get_stage_options(stage, 'mem')) return run_java(self.state, stage, GATK_JAR, mem, args) def get_stage_options(self, stage, *options): return self.state.config.get_stage_options(stage, *options) def get_options(self, *options): return self.state.config.get_options(*options) def original_fastqs(self, output): '''Original fastq files''' # print output pass def align_bwa(self, inputs, bam_out, sample_id, read_id, lane, lib): # def align_bwa(self, inputs, bam_out, sample_id): '''Align the paired end fastq files to the reference genome using bwa''' fastq_read1_in, fastq_read2_in = inputs cores = self.get_stage_options('align_bwa', 'cores') safe_make_dir('alignments/{sample}_{readid}'.format(sample=sample_id, readid=read_id)) read_group = '"@RG\\tID:{readid}\\tSM:{sample}_{readid}\\tPU:lib1\\tLN:{lane}\\tPL:Illumina"' \ .format(readid=read_id, lib=lib, lane=lane, sample=sample_id) command = 'bwa mem -M -t {cores} -R {read_group} {reference} {fastq_read1} {fastq_read2} ' \ '| samtools view -b -h -o {bam} -' \ .format(cores=cores, read_group=read_group, fastq_read1=fastq_read1_in, fastq_read2=fastq_read2_in, reference=self.reference, bam=bam_out) run_stage(self.state, 'align_bwa', command) def apply_undr_rover(self, inputs, vcf_output, sample_id, readid): # def align_bwa(self, inputs, bam_out, sample_id): '''Apply undr_rover to call variants from paired end fastq files''' fastq_read1_in, fastq_read2_in = inputs cores = self.get_stage_options('apply_undr_rover', 'cores') safe_make_dir('variants/undr_rover') safe_make_dir('variants/undr_rover/coverdir') coverfile = "variants/undr_rover/coverdir/" + sample_id + "_" + readid + ".coverage" # read_group = '"@RG\\tID:{readid}\\tSM:{sample}_{readid}\\tPU:lib1\\tLN:{lane}\\tPL:Illumina"' \ # .format(readid=read_id, lib=lib, lane=lane, sample=sample_id) command = 'undr_rover --primer_coords {coord_file} ' \ '--primer_sequences {primer_file} ' \ '--reference {reference} ' \ '--out {vcf_output} ' \ '--coverfile {coverfile} ' \ '--proportionthresh {proportionthresh} ' \ '--absthresh {absthresh} ' \ '--max_variants {maxvariants} ' \ '{fastq_read1} {fastq_read2}'.format( coord_file=self.coord_file, primer_file=self.primer_file, reference=self.reference, vcf_output=vcf_output, #coverdir=self.coverdir, proportionthresh=self.proportionthresh, absthresh=self.absthresh, maxvariants=self.maxvariants, coverfile=coverfile, fastq_read1=fastq_read1_in, fastq_read2=fastq_read2_in) run_stage(self.state, 'apply_undr_rover', command) def clip_bam(self, bam_in, sorted_bam_out): '''Clip the BAM file using Bamclipper''' bamclipper_args = '{bamclipper} -b {bam_in} -p {primer_bedpe_file} -n 1'.format( bamclipper=self.bamclipper, bam_in=bam_in, primer_bedpe_file=self.primer_bedpe_file) run_stage(self.state, 'clip_bam', bamclipper_args) def sort_bam_picard(self, bam_in, sorted_bam_out): '''Sort the BAM file using Picard''' picard_args = 'SortSam INPUT={bam_in} OUTPUT={sorted_bam_out} ' \ 'VALIDATION_STRINGENCY=LENIENT SORT_ORDER=coordinate ' \ 'MAX_RECORDS_IN_RAM=5000000 CREATE_INDEX=True'.format( bam_in=bam_in, sorted_bam_out=sorted_bam_out) self.run_picard('sort_bam_picard', picard_args) def primary_bam(self, bam_in, sbam_out): '''On keep primary alignments in the BAM file using samtools''' command = 'samtools view -h -q 1 -f 2 -F 4 -F 8 -F 256 -b ' \ '-o {sbam_out} {bam_in}'.format( bam_in=bam_in, sbam_out=sbam_out) run_stage(self.state, 'primary_bam', command) # index sorted bam file def index_sort_bam_picard(self, bam_in, bam_index): '''Index sorted bam using samtools''' command = 'samtools index {bam_in} {bam_index}'.format( bam_in=bam_in, bam_index=bam_index) run_stage(self.state, 'index_sort_bam_picard', command) ########## def call_haplotypecaller_gatk(self, bam_in, vcf_out): '''Call variants using GATK''' safe_make_dir('variants/gatk') # safe_make_dir('variants}'.format(sample=sample_id)) gatk_args = "-T HaplotypeCaller -R {reference} --min_base_quality_score 20 " \ "--emitRefConfidence GVCF " \ "-A AlleleBalance -A AlleleBalanceBySample " \ "-A ChromosomeCounts -A ClippingRankSumTest " \ "-A Coverage -A DepthPerAlleleBySample " \ "-A DepthPerSampleHC -A FisherStrand " \ "-A GCContent -A GenotypeSummaries " \ "-A HardyWeinberg -A HomopolymerRun " \ "-A LikelihoodRankSumTest -A LowMQ " \ "-A MappingQualityRankSumTest -A MappingQualityZero " \ "-A QualByDepth " \ "-A RMSMappingQuality -A ReadPosRankSumTest " \ "-A SampleList -A SpanningDeletions " \ "-A StrandBiasBySample -A StrandOddsRatio " \ "-A TandemRepeatAnnotator -A VariantType " \ "--dontUseSoftClippedBases " \ "-I {bam} -o {out}".format(reference=self.reference, bam=bam_in, out=vcf_out) self.run_gatk('call_haplotypecaller_gatk', gatk_args) def combine_gvcf_gatk(self, vcf_files_in, vcf_out): '''Combine G.VCF files for all samples using GATK''' g_vcf_files = ' '.join(['--variant ' + vcf for vcf in vcf_files_in]) gatk_args = "-T CombineGVCFs -R {reference} -L {gatk_bed} " \ "--disable_auto_index_creation_and_locking_when_reading_rods " \ "{g_vcf_files} -o {vcf_out}".format(reference=self.reference, gatk_bed=self.gatk_bed, g_vcf_files=g_vcf_files, vcf_out=vcf_out) self.run_gatk('combine_gvcf_gatk', gatk_args) def genotype_gvcf_gatk(self, combined_vcf_in, vcf_out): '''Genotype G.VCF files using GATK''' cores = self.get_stage_options('genotype_gvcf_gatk', 'cores') gatk_args = "-T GenotypeGVCFs -R {reference} " \ "--disable_auto_index_creation_and_locking_when_reading_rods " \ "--dbsnp {dbsnp} -L {gatk_bed} " \ "--num_threads {cores} --variant {combined_vcf} --out {vcf_out}" \ .format(reference=self.reference, dbsnp=self.dbsnp_hg19, gatk_bed=self.gatk_bed, cores=cores, combined_vcf=combined_vcf_in, vcf_out=vcf_out) self.run_gatk('genotype_gvcf_gatk', gatk_args)
khalidm/hiplexpipe
src/stages.py
Python
mit
11,036
[ "BWA" ]
0f09132d57be82b8522851b50f44b4b116788ac6fc8d4f05ffa74bf3950abfaf
r"""Module for generating a Python state for ParaView. This module uses paraview.smtrace to generate a trace for a selected set of proxies my mimicking the creating of various pipeline components in sequence. Typical usage of this module is as follows:: from paraview import smstate state = smstate.get_state() print state Note, this cannot be called when Python tracing is active. """ from paraview import servermanager as sm from paraview import smtrace from paraview import simple class supported_proxies(object): """filter object used to hide proxies that are currently not supported by the state saving mechanism or those that are generally skipped in state e.g. animation proxies and time keeper.""" def __call__(self, proxy): return proxy and \ not proxy.GetXMLGroup() == "animation" and \ not proxy.GetXMLName() == "TimeKeeper" class visible_representations(object): """filter object to skip hidden representations from being saved in state file""" def __call__(self, proxy): if not supported_proxies()(proxy): return False try: return proxy.Visibility except AttributeError: pass return True def __toposort(input_set): """implementation of Tarjan topological sort to sort proxies using consumer dependencies as graph edges.""" result = [] marked_set = set() while marked_set != input_set: unmarked_node = (input_set - marked_set).pop() __toposort_visit(result, unmarked_node, input_set, marked_set) result.reverse() return result def __toposort_visit(result, proxy, input_set, marked_set, t_marked_set=None): if t_marked_set is None: temporarily_marked_set = set() else: temporarily_marked_set = t_marked_set if proxy in temporarily_marked_set: raise RuntimeError, "Cycle detected in pipeline! %r" % proxy if not proxy in marked_set: temporarily_marked_set.add(proxy) consumers = set() get_consumers(proxy, lambda x: x in input_set, consumer_set=consumers, recursive=False) for x in consumers: __toposort_visit(result, x, input_set, marked_set, temporarily_marked_set) marked_set.add(proxy) temporarily_marked_set.discard(proxy) result.append(proxy) def get_consumers(proxy, filter, consumer_set, recursive=True): """Returns the consumers for a proxy iteratively. If filter is non-None, filter is used to cull consumers.""" for i in xrange(proxy.GetNumberOfConsumers()): consumer = proxy.GetConsumerProxy(i) consumer = consumer.GetTrueParentProxy() if consumer else None consumer = sm._getPyProxy(consumer) if not consumer or consumer.IsPrototype() or consumer in consumer_set: continue if filter(consumer): consumer_set.add(consumer) if recursive: get_consumers(consumer, filter, consumer_set) def get_producers(proxy, filter, producer_set): """Returns the producers for a proxy iteratively. If filter is non-None, filter is used to cull producers.""" for i in xrange(proxy.GetNumberOfProducers()): producer = proxy.GetProducerProxy(i) producer = producer.GetTrueParentProxy() if producer else None producer = sm._getPyProxy(producer) if not producer or producer.IsPrototype() or producer in producer_set: continue if filter(producer): producer_set.add(producer) get_producers(producer, filter, producer_set) # FIXME: LookupTable is missed :/, darn subproxies! try: if proxy.LookupTable and filter(proxy.LookupTable): producer_set.add(proxy.LookupTable) get_producers(proxy.LookupTable, filter, producer_set) except AttributeError: pass try: if proxy.ScalarOpacityFunction and filter(proxy.ScalarOpacityFunction): producer_set.add(proxy.ScalarOpacityFunction) get_producers(proxy.ScalarOpacityFunction, filter, producer_set) except AttributeError: pass def get_state(propertiesToTraceOnCreate=1, # sm.vtkSMTrace.RECORD_MODIFIED_PROPERTIES, skipHiddenRepresentations=True, source_set=[], filter=None, raw=False): """Returns the state string""" if sm.vtkSMTrace.GetActiveTracer(): raise RuntimeError, "Cannot generate Python state when tracing is active." if filter is None: filter = visible_representations() if skipHiddenRepresentations else supported_proxies() # build a set of proxies of interest if source_set: start_set = source_set else: # if nothing is specified, we save all views and sources. start_set = simple.GetSources().values() + simple.GetViews() start_set = [x for x in start_set if filter(x)] # now, locate dependencies for the start_set, pruning irrelevant branches consumers = set(start_set) for proxy in start_set: get_consumers(proxy, filter, consumers) producers = set() for proxy in consumers: get_producers(proxy, filter, producers) # proxies_of_interest is set of all proxies that we should trace. proxies_of_interest = producers.union(consumers) #print "proxies_of_interest", proxies_of_interest trace_config = smtrace.start_trace() # this ensures that lookup tables/scalar bars etc. are fully traced. trace_config.SetFullyTraceSupplementalProxies(True) trace = smtrace.TraceOutput() trace.append("# state file generated using %s" % simple.GetParaViewSourceVersion()) #-------------------------------------------------------------------------- # First, we trace the views and layouts, if any. # TODO: add support for layouts. views = [x for x in proxies_of_interest if smtrace.Trace.get_registered_name(x, "views")] if views: # sort views by their names, so the state has some structure to it. views = sorted(views, cmp=lambda x,y:\ cmp(smtrace.Trace.get_registered_name(x, "views"), smtrace.Trace.get_registered_name(y, "views"))) trace.append_separated([\ "# ----------------------------------------------------------------", "# setup views used in the visualization", "# ----------------------------------------------------------------"]) for view in views: # FIXME: save view camera positions and size. traceitem = smtrace.RegisterViewProxy(view) traceitem.finalize() del traceitem trace.append_separated(smtrace.get_current_trace_output_and_reset(raw=True)) #-------------------------------------------------------------------------- # Next, trace data processing pipelines. sorted_proxies_of_interest = __toposort(proxies_of_interest) sorted_sources = [x for x in sorted_proxies_of_interest \ if smtrace.Trace.get_registered_name(x, "sources")] if sorted_sources: trace.append_separated([\ "# ----------------------------------------------------------------", "# setup the data processing pipelines", "# ----------------------------------------------------------------"]) for source in sorted_sources: traceitem = smtrace.RegisterPipelineProxy(source) traceitem.finalize() del traceitem trace.append_separated(smtrace.get_current_trace_output_and_reset(raw=True)) #-------------------------------------------------------------------------- # Now, trace the transfer functions (color maps and opacity maps) used. ctfs = set([x for x in proxies_of_interest \ if smtrace.Trace.get_registered_name(x, "lookup_tables")]) if ctfs: trace.append_separated([\ "# ----------------------------------------------------------------", "# setup color maps and opacity mapes used in the visualization", "# note: the Get..() functions create a new object, if needed", "# ----------------------------------------------------------------"]) for ctf in ctfs: smtrace.Trace.get_accessor(ctf) if ctf.ScalarOpacityFunction in proxies_of_interest: smtrace.Trace.get_accessor(ctf.ScalarOpacityFunction) trace.append_separated(smtrace.get_current_trace_output_and_reset(raw=True)) #-------------------------------------------------------------------------- # Can't decide if the representations should be saved with the pipeline # objects or afterwords, opting for afterwords for now since the topological # sort doesn't guarantee that the representations will follow their sources # anyways. sorted_representations = [x for x in sorted_proxies_of_interest \ if smtrace.Trace.get_registered_name(x, "representations")] scalarbar_representations = [x for x in sorted_proxies_of_interest\ if smtrace.Trace.get_registered_name(x, "scalar_bars")] # print "sorted_representations", sorted_representations # print "scalarbar_representations", scalarbar_representations if sorted_representations or scalarbar_representations: for view in views: view_representations = [x for x in view.Representations if x in sorted_representations] view_scalarbars = [x for x in view.Representations if x in scalarbar_representations] if view_representations or view_scalarbars: trace.append_separated([\ "# ----------------------------------------------------------------", "# setup the visualization in view '%s'" % smtrace.Trace.get_accessor(view), "# ----------------------------------------------------------------"]) for rep in view_representations: try: producer = rep.Input port = rep.Input.Port traceitem = smtrace.Show(producer, port, view, rep, comment="show data from %s" % smtrace.Trace.get_accessor(producer)) traceitem.finalize() del traceitem trace.append_separated(smtrace.get_current_trace_output_and_reset(raw=True)) if rep.IsScalarBarVisible(view): # FIXME: this will save this multiple times, right now, # if two representations use the same LUT. trace.append_separated([\ "# show color legend", "%s.SetScalarBarVisibility(%s, True)" % (\ smtrace.Trace.get_accessor(rep), smtrace.Trace.get_accessor(view))]) except AttributeError: pass # save the scalar bar properties themselves. if view_scalarbars: trace.append_separated("# setup the color legend parameters for each legend in this view") for rep in view_scalarbars: smtrace.Trace.get_accessor(rep) trace.append_separated(smtrace.get_current_trace_output_and_reset(raw=True)) del trace_config smtrace.stop_trace() #print trace return str(trace) if not raw else trace.raw_data() if __name__ == "__main__": print "Running test" simple.Mandelbrot() simple.Show() simple.Hide() simple.Shrink().ShrinkFactor = 0.4 simple.UpdatePipeline() simple.Clip().ClipType.Normal[1] = 1 rep = simple.Show() view = simple.Render() view.ViewSize=[500, 500] rep.SetScalarBarVisibility(view, True) simple.Render() # rep.SetScalarBarVisibility(view, False) print "====================================================================" print get_state()
HopeFOAM/HopeFOAM
ThirdParty-0.1/ParaView-5.0.1/Wrapping/Python/paraview/smstate.py
Python
gpl-3.0
11,879
[ "ParaView" ]
d32720b2644b8f5625f1dce5d9e394e26599b0d89feab95cb31866e7fce45878
import numpy as np from mayavi import mlab from scipy.interpolate import splprep, splev from traits.api import HasTraits, Instance, Button, on_trait_change, Float from traitsui.api import View, Item, HSplit, Group, VGroup from mayavi.core.ui.api import MlabSceneModel, SceneEditor ################################################################################ # Remark: I used this script to find a good frequency -> xy values mapping. # # I generated, as carefully as I could, all xy values the app would send to # # the lamp. I did this by SLOOOOOOOWLY sliding through the slider, and # # printing out all xy values I received, in order. I made 3 of those files, # # which are ofc a bit different due to the speed I used not being constant, as # # I'm not a robot. # # I then used this script to find a good B-spline interpolation. On the screen # # you can input a value for the smoothing factor s and recalculate + redraw # # the interpolated function. I found a value of 0.001 for s to be good. # # On the interpolated function, 411 equidistant samples are taken, one for # # 0.05 frequency in the range 87.5 -> 108.00. # # The xy values are then printed to the console. # # # # These values are copied in the colorspaces.py, since I didn't want to add # # the dependency to scipy there. # # # # I executed this script in Enthought Canopy Version: 1.7.4.3348 (64 bit). # # Required packages: # # - numpy 1.10.4-1 # # - mayavi 4.4.3-10 # # - vtk 6.3.0-4 # # - scipy 0.17.1-1 # # - traits 4.5.0-1 # # - traitsui 5.1.0-1 # ################################################################################ def read_file(filename): linenb = 0 data = [[], [], []] for line in open(filename, 'r'): fields = line[1:-2].split(',') data[0].append(linenb) data[1].append(float(fields[0])) data[2].append(float(fields[1])) linenb = linenb + 1 return np.array(data) class MyDialog(HasTraits): p0 = read_file('testGO0.txt') p1 = read_file('testGO1.txt') p2 = read_file('testGO2.txt') new_u = x = y = None scene1 = Instance(MlabSceneModel, ()) scene2 = Instance(MlabSceneModel, ()) button1 = Button('Redraw') button2 = Button('Redraw') buttonSave = Button('Save') s1 = Float s2 = Float @on_trait_change('button1') def redraw_scene1(self): self.redraw_scene(self.scene1, self.s1) @on_trait_change('button2') def redraw_scene2(self): self.redraw_scene(self.scene2, self.s2) @on_trait_change('buttonSave') def save(self): f = open('outputGO.txt', 'w') f.write('freq = [\n') for i in range(0, len(self.new_u)): f.write(' [%s,%s],\n' % (self.x[i], self.y[i])) f.write(']') f.close() def redraw_scene(self, scene, s): mlab.clf(figure=scene.mayavi_scene) mlab.plot3d(np.divide(self.p0[0], 100), self.p0[1], self.p0[2], tube_radius=0.005, color=(1, 0, 0), figure=scene.mayavi_scene) mlab.plot3d(np.divide(self.p1[0], 100), self.p1[1], self.p1[2], tube_radius=0.005, color=(0, 1, 0), figure=scene.mayavi_scene) mlab.plot3d(np.divide(self.p2[0], 100), self.p2[1], self.p2[2], tube_radius=0.005, color=(0, 0, 1), figure=scene.mayavi_scene) tck, u = splprep([self.p1[1], self.p1[2]], u=np.linspace(87.50, 108.00, len(self.p1[0])), s=s, k=3) self.new_u = np.linspace(87.50, 108.00, 411) self.x, self.y = splev(self.new_u, tck, ext=2) mlab.plot3d(np.divide(self.new_u, 100), self.x, self.y, tube_radius=0.005, color=(1, 1, 1), figure=scene.mayavi_scene) # The layout of the dialog created view = View(VGroup( HSplit( Group( Item('scene1', editor=SceneEditor(), height=250, width=300), 'button1', 's1', show_labels=False, ), Group( Item('scene2', editor=SceneEditor(), height=250, width=300, show_label=False), 'button2', 's2', show_labels=False, ) ), 'buttonSave', show_labels=False ), resizable=True, ) m = MyDialog() m.configure_traits()
drhoet/marantz-hue-adapter
analysis/color_space_analysis.py
Python
mit
5,469
[ "Mayavi", "VTK" ]
24e0ad7aa8c173ce2f893387474a6326bb6d421e0b2548dabb32de7c26495b37
#!/usr/bin/env python # ---------------------------------------------------------------------------- # Copyright 2015 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ MNIST example demonstrating the use of merge layers. """ from neon.data import ArrayIterator, load_mnist from neon.initializers import Gaussian from neon.layers import GeneralizedCost, Affine, Sequential, MergeMultistream from neon.models import Model from neon.optimizers import GradientDescentMomentum from neon.transforms import Rectlin, Logistic, CrossEntropyBinary from neon.callbacks.callbacks import Callbacks from neon.util.argparser import NeonArgparser # parse the command line arguments parser = NeonArgparser(__doc__) args = parser.parse_args() # hyperparameters num_epochs = args.epochs (X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir) train_set = ArrayIterator([X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28)) valid_set = ArrayIterator([X_test, X_test], y_test, nclass=nclass, lshape=(1, 28, 28)) # weight initialization init_norm = Gaussian(loc=0.0, scale=0.01) # initialize model path1 = Sequential(layers=[Affine(nout=100, init=init_norm, activation=Rectlin()), Affine(nout=100, init=init_norm, activation=Rectlin())]) path2 = Sequential(layers=[Affine(nout=100, init=init_norm, activation=Rectlin()), Affine(nout=100, init=init_norm, activation=Rectlin())]) layers = [MergeMultistream(layers=[path1, path2], merge="stack"), Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))] model = Model(layers=layers) cost = GeneralizedCost(costfunc=CrossEntropyBinary()) # fit and validate optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9) # configure callbacks callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args) model.fit(train_set, cost=cost, optimizer=optimizer, num_epochs=num_epochs, callbacks=callbacks)
coufon/neon-distributed
examples/mnist_merge.py
Python
apache-2.0
2,564
[ "Gaussian" ]
96a4407ebe1dc2cdcf0a09348c6fe8cd8033cff060595dc81d46bbc8a1cca654
""" Handles testing of the core object and function module # Written By: Matthew Stadelman Date Written: 2016/06/09 Last Modifed: 2017/03/03 # """ from argparse import Namespace import logging import os import pytest import re import sys import scipy as sp import PIL import apmapflow as apm import apmapflow.ap_map_flow as apm_core class TestCore: r""" Tests each of the functions an classes used in the ap_map_flow module """ def test_data_field(self): r""" Builds a data field and tests its properties """ # # covering basic methods map_file = 'parallel-plate-01vox.txt' fname = os.path.join(FIXTURE_DIR, 'maps', map_file) field = apm.DataField(fname) field.create_point_data() # # testing initization from data obj = apm.DataField(field.data_map) assert obj.nx == 100 assert obj.nz == 100 assert obj.data_map.size == 10000 # obj = Namespace() field.copy_data(obj) # assert obj.nx == 100 assert obj.nz == 100 assert obj.data_map.size == 10000 assert obj.point_data.size == 40000 # # testing adjacency matrix matrix = field.create_adjacency_matrix() assert matrix is not None # # testing thresholding field._data_map = sp.arange(field.nz*field.nx, dtype=float).reshape(field.nz, field.nx) low_inds = sp.where(field.data_vector <= 100) high_inds = sp.where(field.data_vector >= 900) field.threshold_data(min_value=100, repl=-1) field.threshold_data(max_value=900) assert sp.all(field.data_vector[low_inds] == -1) assert sp.all(sp.isnan(field.data_vector[high_inds])) # # testing VTK file generation field.infile = os.path.join(TEMP_DIR, 'test-export.csv') field.export_vtk() fname = os.path.join(TEMP_DIR, 'test-export.vtk') assert os.path.isfile(fname) # with open(fname, 'r') as file: content = file.read() assert re.search('DATASET STRUCTURED_GRID\n', content) assert re.search('DIMENSIONS 101 2 101\n', content) assert re.search('POINTS 20402 float\n', content) assert re.search('CELL_DATA 10000\n', content) assert re.search('SCALARS data float\n', content) # with pytest.raises(FileExistsError): field.export_vtk() def test_fracture_image_stack(self): r""" Loads and builds an image stack to test its properties """ # # testing initialization from data array img_data = sp.ones((10, 11, 12)) fracture_stack = apm.FractureImageStack(img_data, dtype=sp.uint8) assert issubclass(fracture_stack.__class__, sp.ndarray) assert fracture_stack.dtype == sp.uint8 assert fracture_stack.shape == img_data.shape assert fracture_stack.size == img_data.size # # testing initialization from image file fname = os.path.join(FIXTURE_DIR, 'binary-fracture.tif') fracture_stack = apm.FractureImageStack(fname) assert issubclass(fracture_stack.__class__, sp.ndarray) assert fracture_stack.dtype == bool assert fracture_stack.shape == (507, 46, 300) assert fracture_stack.nx == fracture_stack.shape[0] assert fracture_stack.ny == fracture_stack.shape[1] assert fracture_stack.nz == fracture_stack.shape[2] # # test fetching of fracture voxels voxels = fracture_stack.get_fracture_voxels() assert voxels.size == 733409 del voxels # # checking all coordinates are between 0 and maximum axis size x_c, y_c, z_c = fracture_stack.get_fracture_voxels(coordinates=True) assert x_c.size == y_c.size == z_c.size == 733409 assert sp.all(x_c < fracture_stack.nx) assert sp.all(~x_c < 0) assert sp.all(y_c < fracture_stack.ny) assert sp.all(~y_c < 0) assert sp.all(z_c < fracture_stack.nz) assert sp.all(~z_c < 0) # # testing aperture map output fname = os.path.join(FIXTURE_DIR, 'maps', 'binary-fracture-aperture-map.txt') test_map = fracture_stack.create_aperture_map() data_map = sp.loadtxt(fname, delimiter='\t') assert sp.all(test_map == data_map) # # testing offset map output fname = os.path.join(FIXTURE_DIR, 'maps', 'binary-fracture-offset-map.txt') test_map = fracture_stack.create_offset_map() data_map = sp.loadtxt(fname, delimiter='\t') assert sp.all(test_map == data_map) del test_map del data_map # # testing image stack saving fname = os.path.join(TEMP_DIR, 'test.tif') fracture_stack.save(fname) new_stack = apm.FractureImageStack(fname) assert sp.all(fracture_stack == new_stack) del new_stack # testing overwrite parameter with pytest.raises(FileExistsError): fracture_stack.save(fname) # fracture_stack.save(fname, overwrite=True) def test_toplevel_logger(self): r""" Tests the configuation of the top level logger """ logger = apm_core._get_logger('apmapflow') assert logger.name == 'APM' assert len(logger.handlers) == 0 def test_get_logger(self): r""" Tests creation of a logger """ logger = apm_core._get_logger('apmapflow.Test.TestCore') # assert logger.name == 'APM.Test.TestCore' def test_set_main_logger_level(self): r""" Tests adjudtment of primary logger level """ # logger = logging.getLogger('APM') # apm_core.set_main_logger_level('debug') assert logger.getEffectiveLevel() == logging.DEBUG # apm_core.set_main_logger_level(logging.INFO) assert logger.getEffectiveLevel() == logging.INFO def test_files_from_directory(self): r""" Runs the files_from_directory command with various args """ files = apm.files_from_directory('.', '*') assert len(files) files = apm.files_from_directory('.', re.compile('.')) assert len(files) def test_load_infile_list(self): r""" Sends a list of infiles """ fname1 = os.path.join(FIXTURE_DIR, 'maps', 'parallel-plate-01vox.txt') fname2 = os.path.join(FIXTURE_DIR, 'maps', 'parallel-plate-10vox.txt') infile_list = [fname1, fname2] # fields = apm.load_infile_list(infile_list) assert fields def test_calc_percentile(self): r""" Sends a test array to the calc percentile function """ data_list = list(range(100)) val = apm.calc_percentile(99, data_list) assert val == 99 def test_calc_percentile_num(self): r""" Sends a test array to the calc percentile function """ data_list = list(range(100)) val = apm.calc_percentile_num(50, data_list, last=False) assert val*100 == 50 val = apm.calc_percentile_num(50, data_list, last=True) assert val*100 == 51 def test_get_data_vect(self): r""" Tests extraction of a vector from a data array """ data = sp.arange(100) data = data.reshape(10, 10) # vect = apm.get_data_vect(data, 'x', 0) assert sp.all(vect == data[0, :]) vect = apm.get_data_vect(data, 'x', 11) assert sp.all(vect == data[9, :]) vect = apm.get_data_vect(data, 'z', 0) assert sp.all(vect == data[:, 0]) vect = apm.get_data_vect(data, 'z', 11) assert sp.all(vect == data[:, 9]) # with pytest.raises(ValueError): apm.get_data_vect(data, 'y')
stadelmanma/netl-AP_MAP_FLOW
test/unit/TestCore.py
Python
gpl-3.0
7,963
[ "VTK" ]
b2eecbe7b2c70d7c3910446d4cbdeabba566f7ad0dac1cfe8509ae8eda360a12
# -*- coding: utf-8 -*- # vim: ai ts=4 sts=4 et sw=4 nu import os import sys import json import platform import tempfile import threading import multiprocessing from collections import OrderedDict import gi import pytz import iso639 import tzlocal import requests import humanfriendly import webbrowser from backend.content import ( get_expanded_size, get_collection, get_required_image_size, get_content, isremote, ) import data import sd_card_info from util import relpathto from util import get_cache from util import CLILogger from util import check_user_inputs from version import get_version_str from util import b64encode, b64decode from util import get_free_space_in_dir from util import get_adjusted_image_size from util import split_proxy, save_prefs from backend.catalog import get_catalogs from util import CancelEvent, ProgressHelper from run_installation import run_installation from backend.util import sd_has_single_partition, flash_image_with_etcher from backend.mount import open_explorer_for_imdisk from util import human_readable_size, ONE_GB, ONE_MiB from backend.cache import clean_cache, reset_cache from backend.cache import get_cache_size_and_free_space from backend.download import get_proxies, test_connection gi.require_version("Gtk", "3.0") from gi.repository import Gtk, Gdk, GLib, GdkPixbuf, GObject VALID_RGBA = Gdk.RGBA(0.0, 0.0, 0.0, 0.0) INVALID_RGBA = Gdk.RGBA(1, 0.5, 0.5, 1.0) mainloop = None def quit(*args, **kwargs): global mainloop mainloop.quit() def run(): global mainloop mainloop = GObject.MainLoop() try: mainloop.run() except KeyboardInterrupt: print("\nKeyboardInterrupt, exiting.") quit() class ShortDialog(Gtk.Dialog): def __init__(self, parent, buttons, msg): Gtk.Dialog.__init__(self, "Kiwix Hotspot", parent, 0, buttons) self.set_default_size(150, 100) label = Gtk.Label(msg) box = self.get_content_area() box.add(label) self.show_all() def hide_on_delete(widget, event): widget.hide() return True class Logger(ProgressHelper): def __init__(self, component): super(Logger, self).__init__() self.component = component self.text_buffer = self.component.run_text_view.get_buffer() self.step_tag = self.text_buffer.create_tag("step", foreground="blue") self.err_tag = self.text_buffer.create_tag("err", foreground="red") self.succ_tag = self.text_buffer.create_tag("succ", foreground="green") self.stg_tag = self.text_buffer.create_tag("stg", foreground="purple") self.run_pulse() @property def on_tty(self): return False def flash(self, line): pass def ascii_progressbar(self, current, total): width = 60 avail_dots = width - 2 if total == -1: line = "unknown size" elif current >= total: line = "[" + "." * avail_dots + "] 100%\n" else: ratio = min(float(current) / total, 1.0) shaded_dots = min(int(ratio * avail_dots), avail_dots) percent = min(int(ratio * 100), 100) line = ( "[" + "." * shaded_dots + " " * (avail_dots - shaded_dots) + "] " + str(percent) + "%\r" ) if getattr(self, "_last_progress_line", None) != line: self.raw_std(line) setattr(self, "_last_progress_line", line) def step(self, step): GLib.idle_add(self.main_thread_step, step) def err(self, err): GLib.idle_add(self.main_thread_err, err) def succ(self, succ): GLib.idle_add(self.main_thread_succ, succ) def raw_std(self, std): GLib.idle_add(self.main_thread_raw_std, std, end="") def std(self, std, end=None): GLib.idle_add(self.main_thread_std, std, end) def complete(self): GLib.idle_add(self.main_thread_complete) def failed(self, error): GLib.idle_add(self.main_thread_failed, error) def main_thread_text(self, text, end="\n", tag=None): if not isinstance(text, str): text = str(text) text = self._add_time(text) text += end text_iter = self.text_buffer.get_end_iter() if tag is None: self.text_buffer.insert(text_iter, text) else: self.text_buffer.insert_with_tags(text_iter, text, tag) def main_thread_step(self, text): self.main_thread_text("--> {}".format(text), "\n", self.step_tag) self._update_progress_text(text) def main_thread_err(self, text): self.main_thread_text(text, "\n", self.err_tag) def main_thread_succ(self, text): self.main_thread_text(text, "\n", self.succ_tag) def main_thread_raw_std(self, text): self.main_thread_text(text) def main_thread_std(self, text, end=None): self.main_thread_text(text, end if end is not None else "\n") def _update_progress_text(self, text): self.component.run_progressbar.set_text(text) def update(self): GLib.idle_add(self.update_gui) def update_gui(self): # show text progress in console self.main_thread_text( "[STAGE {nums}: {name} - {pc:.0f}%]".format( nums=self.stage_numbers, name=self.stage_name, pc=self.get_overall_progress() * 100, ), tag=self.stg_tag, ) # update overall percentage on window title self.component.run_window.set_title( "Kiwix Hotspot ({:.0f}%)".format(self.get_overall_progress() * 100) ) # update stage name and number (Stage x/y) self.component.run_step_label.set_markup( "<b>Stage {nums}</b>: {name}".format( nums=self.stage_numbers, name=self.stage_name ) ) # update the progress bar according to the stage's progress if self.stage_progress is not None: self.component.run_progressbar.set_inverted(False) self.component.run_progressbar.set_fraction(self.stage_progress) else: # animate the stage progress bar to show an unknown progress self.run_pulse() def main_thread_complete(self): super(Logger, self).complete() self.main_thread_succ("Installation succeded.") self.component.run_step_label.set_markup("<b>Done.</b>") self.progress(1) def main_thread_failed(self, error): super(Logger, self).failed() self.step("Failed: {}".format(error[0:50])) self.err("Installation failed: {}".format(error)) self.progress(1) def run_pulse(self): """ used for progress bar animation (unknown progress) """ self._update_progress_text("") self.timeout_id = GObject.timeout_add(50, self.on_timeout) def on_timeout(self): """ used for progress bar animation (unknown progress) """ if self.stage_progress is None: new_value = self.component.run_progressbar.get_fraction() + 0.035 # inverse direction if end reached if new_value > 1: new_value = 0 # switch from left-to-right to right-to-left at bounds self.component.run_progressbar.set_inverted( not self.component.run_progressbar.get_inverted() ) self.component.run_progressbar.set_fraction(new_value) return True # returns True so it continues to get called class Component: def __init__(self, builder): self.builder = builder def __getattr__(self, key): """Allow UI builder widgets to be accessed as self.widgetname""" widget = self.builder.get_object(key) if widget: setattr(self, key, widget) return widget raise AttributeError(key) def validate_label(label, condition): if condition: label.modify_bg(Gtk.StateFlags.NORMAL) else: label.modify_bg(Gtk.StateFlags.NORMAL, INVALID_RGBA.to_color()) class Application: def __init__(self): self.catalogs = None builder = Gtk.Builder() builder.add_from_file(data.ui_glade) self.component = Component(builder) self.cancel_event = CancelEvent() self.logger = Logger(self.component) # main window self.component.window.connect("delete-event", quit) # gtk file filters (macOS fix) self.component.favicon_filter.set_name("Favicon (ICO, PNG)") # opt self.component.favicon_filter.add_pattern("*.png") self.component.favicon_filter.add_pattern("*.ico") self.component.favicon_chooser.add_filter(self.component.favicon_filter) self.component.logo_filter.set_name("Logo (PNG)") # opt self.component.logo_filter.add_pattern("*.png") self.component.logo_chooser.add_filter(self.component.logo_filter) self.component.css_filter.set_name("CSS File") # opt self.component.css_filter.add_pattern("*.css") self.component.css_chooser.add_filter(self.component.css_filter) self.component.edupi_resources_filter.set_name("ZIP File") # opt self.component.edupi_resources_filter.add_pattern("*.zip") self.component.edupi_resources_chooser.add_filter( self.component.edupi_resources_filter ) # menu bar self.component.menu_quit.connect( "activate", lambda widget: self.component.window.close() ) self.component.menu_about.connect("activate", self.activate_menu_about) self.component.menu_load_config.connect( "activate", self.activate_menu_config, False ) self.component.menu_save_config.connect( "activate", self.activate_menu_config, True ) self.component.menu_help.connect("activate", self.activate_menu_help) # imdisk menu is windows only if sys.platform == "win32": self.component.menu_imdisk.set_visible(True) self.component.menu_imdisk.connect("activate", self.activate_menu_imdisk) # proxies self.component.menu_proxies.connect( "activate", lambda widget: self.component.proxies_dialog.show() ) self.component.reset_proxies_button.connect( "clicked", self.reset_proxies_button_clicked ) self.component.save_proxies_button.connect( "clicked", self.save_proxies_button_clicked ) self.component.test_proxies_button.connect( "clicked", self.test_proxies_button_clicked ) self.component.proxies_dialog.connect( "show", lambda widget: self._set_proxies_entries() ) self.component.proxies_dialog.connect("delete-event", hide_on_delete) # etcher self.component.menu_etcher.connect("activate", self.activate_menu_etcher) # cache self.component.clean_cache_button.connect("clicked", self.activate_menu_cache) # sd clean self.component.clean_sd_button.connect("clicked", self.activate_sd_clean) # wifi password self.component.wifi_password_switch.connect( "notify::active", lambda switch, state: self.component.wifi_password_revealer.set_reveal_child( not switch.get_active() ), ) # edupi resources self.component.edupi_switch.connect( "notify::active", lambda switch, state: self.component.edupi_resources_revealer.set_reveal_child( switch.get_active() ), ) # ideascube language for code, language in data.hotspot_languages: self.component.language_tree_store.append([code, language]) renderer = Gtk.CellRendererText() self.component.language_combobox.pack_start(renderer, True) self.component.language_combobox.add_attribute(renderer, "text", 1) # SD sizes for image size for ngb in data.sdcard_sizes: self.component.sizes_tree_store.append([str(ngb), "{} GB".format(ngb)]) renderer = Gtk.CellRendererText() self.component.size_combobox.pack_start(renderer, True) self.component.size_combobox.add_attribute(renderer, "text", 1) renderer = Gtk.CellRendererText() self.component.timezone_combobox.pack_start(renderer, True) self.component.timezone_combobox.add_attribute(renderer, "text", 0) # build-path self.component.build_path_chooser.connect("file-set", self.changed_build_path) # output self.component.sd_card_combobox.connect( "changed", lambda _: self.update_free_space() ) self.component.sd_card_combobox.connect( "changed", lambda w: self.on_sdcard_selection_change(w) ) self.component.sd_card_refresh_button.connect( "clicked", self.sd_card_refresh_button_clicked ) self.component.output_stack.connect( "notify::visible-child", lambda switch, state: self.update_free_space() ) self.component.size_combobox.connect( "changed", lambda _: self.update_free_space() ) types = [info["typ"] for info in sd_card_info.informations] self.component.sd_card_list_store = Gtk.ListStore(*types) self.component.sd_card_combobox.set_model(self.component.sd_card_list_store) for counter in range(0, sd_card_info.visible_informations): cell_renderer = Gtk.CellRendererText() self.component.sd_card_combobox.pack_start(cell_renderer, True) self.component.sd_card_combobox.add_attribute( cell_renderer, "text", counter ) # about dialog self.component.about_dialog.set_logo( GdkPixbuf.Pixbuf.new_from_file_at_scale(data.pibox_logo, 200, -1, True) ) self.component.about_dialog.set_version(get_version_str()) # done window self.component.done_window_ok_button.connect( "clicked", lambda widget: self.component.done_window.hide() ) self.component.done_window.connect("delete-event", hide_on_delete) # space error window self.component.space_error_window_ok_button.connect( "clicked", self.space_error_window_ok_button_clicked ) self.component.space_error_window.connect("delete-event", hide_on_delete) # run window self.component.run_installation_button.connect( "clicked", self.run_installation_button_clicked ) self.component.run_window.connect("delete-event", self.run_window_delete_event) self.component.run_text_view.get_buffer().connect( "modified-changed", self.run_text_view_scroll_down ) self.component.run_quit_button.connect("clicked", self.run_quit_button_clicked) self.component.run_abort_button.connect( "clicked", self.run_abort_button_clicked ) self.component.run_copy_log_to_clipboard_button.connect( "clicked", self.run_copy_log_to_clipboard_button_clicked ) self.component.run_new_install_button.connect( "clicked", self.run_new_install_button_clicked ) # zim content self.component.zim_choose_content_button.connect( "clicked", self.zim_choose_content_button_clicked ) self.component.zim_list_store = Gtk.ListStore( str, # key str, # name str, # url str, # description str, # formatted_size object, # languages str, # type str, # version bool, # selected str, # size bool, # its language is selected Gdk.RGBA, # background color ) self.component.zim_list_store.set_sort_column_id(1, Gtk.SortType.ASCENDING) def get_project_size(name, lang): langs = ["fr", "en"] if name == "aflatoun" else [lang] return get_expanded_size( get_collection(**{"{}_languages".format(name): langs}), add_margin=False ) # kalite for lang, button in self.iter_kalite_check_button(): button.set_label( "{} ({})".format( button.get_label(), human_readable_size(get_project_size("kalite", lang)), ) ) button.connect("toggled", lambda button: self.update_free_space()) # wikifundi for lang, button in self.iter_wikifundi_check_button(): button.set_label( "{} ({})".format( button.get_label(), human_readable_size(get_project_size("wikifundi", lang)), ) ) button.connect("toggled", lambda button: self.update_free_space()) # aflatoun self.component.aflatoun_switch.connect( "notify::active", lambda switch, state: self.update_free_space() ) self.component.aflatoun_label.set_label( "{} ({})".format( self.component.aflatoun_label.get_label(), human_readable_size(get_project_size("aflatoun", lang)), ) ) # edupi self.component.edupi_switch.connect( "notify::active", lambda switch, state: self.update_free_space() ) self.component.edupi_label.set_label( "{} ({})".format( self.component.edupi_label.get_label(), human_readable_size(10 * ONE_MiB), ) ) self.component.edupi_resources_url_entry.connect( "changed", lambda _: self.update_free_space() ) self.component.edupi_resources_chooser.connect( "file-set", lambda _: self.update_free_space() ) # nomad self.component.nomad_switch.connect( "notify::active", lambda switch, state: self.update_free_space() ) self.component.nomad_label.set_label( "{} ({})".format( self.component.nomad_label.get_label(), human_readable_size( get_expanded_size(get_collection(nomad=True), add_margin=False) ), ) ) # mathews self.component.mathews_switch.connect( "notify::active", lambda switch, state: self.update_free_space() ) self.component.mathews_label.set_label( "{} ({})".format( self.component.mathews_label.get_label(), human_readable_size( get_expanded_size(get_collection(mathews=True), add_margin=False) ), ) ) self.refresh_disk_list() self.reset_config() # will calculate free space self.component.window.show() self.catalogs_thread = threading.Thread(target=self.download_catalogs) self.catalogs_thread.start() def ensure_connection(self): """ test and return Connection Status. Display Error of failure """ conn_working, failed_protocol = test_connection() if not conn_working: self.display_error_message( "Internet Connection Failed ({})".format(failed_protocol), "Unable to contact Kiwix Server.\nPlease check your Internet Connection and/or Proxy Settings (from the File menu).", self.component.window, ) return False return True def download_catalogs(self): self.catalogs = get_catalogs(CLILogger()) return self.catalogs is not None def ensure_catalogs(self): if self.catalogs_thread.is_alive(): # let's wait for the catalog thread to complete self.catalogs_thread.join() if self.catalogs is None: if not self.download_catalogs(): self.display_error_message( title="Catalogs Download Failed", message="Could not download the Content Catalogs. Please check your Internet connection and/or Proxy Settings (File menu).", parent=self.component.window, ) return False # now that we have the catalogs, build the ZIM store if not already done if not len(self.component.zim_list_store): self.build_zim_store() return True def build_zim_store(self): all_languages = set() for one_catalog in self.catalogs: for (key, value) in one_catalog["all"].items(): name = value["name"] url = value["url"] description = value.get("description") or "none" size = str(value["size"]) languages = [] for iso_code in (value.get("language") or "Unknown language").split( "," ): try: languages.append(iso639.languages.get(part3=iso_code).name) except KeyError: pass languages = set(languages) typ = value["type"] version = str(value["version"]) formatted_size = human_readable_size(int(size)) self.component.zim_list_store.append( [ key, name, url, description, formatted_size, languages, typ, version, False, size, True, VALID_RGBA, ] ) all_languages |= languages self.component.zim_language_list_store = Gtk.ListStore(str) self.component.zim_language_list_store.set_sort_column_id( 0, Gtk.SortType.ASCENDING ) for language in all_languages: self.component.zim_language_list_store.append([language]) # zim window self.component.zim_window_done_button.connect( "clicked", self.zim_done_button_clicked ) self.component.zim_window.connect("delete-event", hide_on_delete) self.component.zim_tree_view.connect( "row-activated", self.available_zim_clicked ) self.component.choosen_zim_tree_view.connect( "row-activated", self.choosen_zim_clicked ) # zim window available tree view self.component.zim_tree_view.set_model(self.component.zim_list_store) renderer_text = Gtk.CellRendererText() column_text = Gtk.TreeViewColumn("Name", renderer_text, text=1) self.component.zim_tree_view.append_column(column_text) column_text = Gtk.TreeViewColumn("Size", renderer_text, text=4) self.component.zim_tree_view.append_column(column_text) column_text = Gtk.TreeViewColumn("Description", renderer_text, text=3) self.component.zim_tree_view.append_column(column_text) column_text.add_attribute(renderer_text, "cell_background_rgba", 11) zim_filter = self.component.zim_list_store.filter_new() zim_filter.set_visible_func(self.zim_filter_func) self.component.zim_tree_view.set_model(zim_filter) # zim window choosen tree view self.component.choosen_zim_tree_view.set_model(self.component.zim_list_store) renderer_text = Gtk.CellRendererText() column_text = Gtk.TreeViewColumn("Name", renderer_text, text=1) self.component.choosen_zim_tree_view.append_column(column_text) column_text = Gtk.TreeViewColumn("Size", renderer_text, text=4) self.component.choosen_zim_tree_view.append_column(column_text) column_text = Gtk.TreeViewColumn("Description", renderer_text, text=3) self.component.choosen_zim_tree_view.append_column(column_text) # language tree view renderer_text = Gtk.CellRendererText() column_text = Gtk.TreeViewColumn("Language", renderer_text, text=0) self.component.zim_language_tree_view.append_column(column_text) self.component.zim_language_tree_view.get_selection().set_mode( Gtk.SelectionMode(3) ) self.component.zim_language_tree_view.set_model( self.component.zim_language_list_store ) self.component.zim_language_tree_view.get_selection().select_all() self.component.zim_language_tree_view.get_selection().connect( "changed", self.zim_language_selection_changed ) # apply chosen zim filter choosen_zim_filter = self.component.zim_list_store.filter_new() choosen_zim_filter.set_visible_func(self.choosen_zim_filter_func) self.component.choosen_zim_tree_view.set_model(choosen_zim_filter) self.update_free_space() def reset_config(self): """ restore UI to its initial (non-configured) state """ # name self.component.project_name_entry.set_text("Kiwix") # language index = -1 for i, (code, language) in enumerate(data.hotspot_languages): if code == "en": index = i self.component.language_combobox.set_active(index) # timezone default_id = -1 local_tz = tzlocal.get_localzone() for id, timezone in enumerate(pytz.common_timezones): if timezone == "UTC" and default_id == -1: default_id = id if pytz.timezone(timezone) == local_tz: default_id = id self.component.timezone_tree_store.append(None, [timezone]) self.component.timezone_combobox.set_active(default_id) # wifi self.component.wifi_password_switch.set_active(True) self.component.wifi_password_entry.set_text("hotspot-password") # admin account self.component.admin_account_login_entry.set_text("admin") self.component.admin_account_pwd_entry.set_text("admin-password") # branding for key in ("logo", "favicon", "css"): getattr(self.component, "{}_chooser".format(key)).unselect_all() # build_dir self.component.build_path_chooser.unselect_all() # size self.component.size_combobox.set_active(0) # content for key in ("kalite", "wikifundi"): for lang, button in getattr(self, "iter_{}_check_button".format(key))(): button.set_active(False) for key in ("edupi", "aflatoun", "nomad", "mathews"): getattr(self.component, "{}_switch".format(key)).set_active(False) # edupi resources self.component.edupi_resources_url_entry.set_text("") self.component.edupi_resources_chooser.unselect_all() # static contents for index, zim in enumerate(self.component.zim_list_store): if zim[8]: self.component.zim_list_store[index][8] = False self.component.choosen_zim_tree_view.set_model(self.component.zim_list_store) choosen_zim_filter = self.component.zim_list_store.filter_new() choosen_zim_filter.set_visible_func(self.choosen_zim_filter_func) self.component.choosen_zim_tree_view.set_model(choosen_zim_filter) self.update_free_space() def iter_kalite_check_button(self): return [ ("fr", self.component.kalite_fr_check_button), ("en", self.component.kalite_en_check_button), ("es", self.component.kalite_es_check_button), ] def iter_wikifundi_check_button(self): return [ ("fr", self.component.wikifundi_fr_check_button), ("en", self.component.wikifundi_en_check_button), ] def space_error_window_ok_button_clicked(self, widget): self.component.space_error_window.hide() def activate_menu_about(self, widget): response = self.component.about_dialog.run() if ( response == Gtk.ResponseType.DELETE_EVENT or response == Gtk.ResponseType.CANCEL ): self.component.about_dialog.hide() def activate_menu_help(self, widget): webbrowser.open(data.help_url) def _set_proxies_entries(self, proxies=None): """ fill proxies_dialog entries with proxies conf (passed or prefs) """ proxies = proxies if proxies is not None else get_proxies() http_loc, http_port = split_proxy(proxies.get("http", "")) self.component.http_proxy_entry.set_text(http_loc) self.component.http_proxy_port_entry.set_text(http_port) https_loc, https_port = split_proxy(proxies.get("https", "")) self.component.https_proxy_entry.set_text(https_loc) self.component.https_proxy_port_entry.set_text(https_port) def _get_proxies_entries(self): """ return proxies conf from the proxies_dialog entries """ http_proxy = self.component.http_proxy_entry.get_text().strip() http_proxy_port = self.component.http_proxy_port_entry.get_text().strip() https_proxy = self.component.https_proxy_entry.get_text().strip() https_proxy_port = self.component.https_proxy_port_entry.get_text().strip() proxies = {} if http_proxy and http_proxy_port: proxies.update( { "http": "http://{netloc}:{port}".format( netloc=http_proxy, port=http_proxy_port ) } ) if https_proxy and https_proxy_port: proxies.update( { "https": "http://{netloc}:{port}".format( netloc=https_proxy, port=https_proxy_port ) } ) return proxies def test_proxies_button_clicked(self, widget): """ test connection using the (non-saved) proxy conf in the proxies dialog """ proxies = self._get_proxies_entries() conn_working, failed_protocol = test_connection(proxies=proxies) if conn_working: mtype = Gtk.MessageType.INFO title = "Connection Successful" message = ( "We could reach Kiwix server using those settings.\n" "You can now save them and pursue." ) else: mtype = Gtk.MessageType.ERROR title = "Connection Failed ({})".format(failed_protocol) message = ( "Unable to contact Kiwix server using those Settings.\n" "Either your Internet Connection is not working or those settings are incorrect." ) msg_box = Gtk.MessageDialog( self.component.proxies_dialog, None, mtype, Gtk.ButtonsType.OK, title ) msg_box.format_secondary_text(message) msg_box.set_modal(True) msg_box.run() msg_box.destroy() def reset_proxies_button_clicked(self, widget): """ set proxies conf and prefs to not use proxy at all """ # reset UI self._set_proxies_entries({}) # save prefs and reload proxies save_prefs({}, auto_reload=True) get_proxies(load_env=False, force_reload=True) # close dialog self.component.proxies_dialog.hide() def save_proxies_button_clicked(self, widget): """ save in prefs and use proxies conf from proxies_dialog """ proxies = self._get_proxies_entries() prefs = {} if proxies.get("http"): prefs.update({"HTTP_PROXY": proxies.get("http")}) if proxies.get("https"): prefs.update({"HTTPS_PROXY": proxies.get("https")}) # save prefs and reload proxies save_prefs(prefs, auto_reload=True) get_proxies(load_env=False, force_reload=True) # reflect changes on UI self._set_proxies_entries() # close dialog self.component.proxies_dialog.hide() def activate_menu_imdisk(self, widget): class ImDiskDialog(Gtk.Dialog): def __init__(self, parent): Gtk.Dialog.__init__( self, "Install or Uninstall ImDisk Manually", parent, 0, ( Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OK, Gtk.ResponseType.OK, ), ) self.set_default_size(500, 300) label = Gtk.Label() label.set_markup( "\nBy selecting OK bellow, you will be directed to the " "ImDisk installation folder.\n" "<b>Right-click on <i>install.cmd</i></b> and choose to " "<u>Run as Administrator</u>.\n" ) label.set_alignment(0, 0.5) label2 = Gtk.Label() label2.set_markup( "\nYou can also uninstall it from that folder by " "doing the same with <i>uninstall_imdisk.cmd</i>.\n" ) label2.set_alignment(0, 0.5) image = Gtk.Image.new_from_file( os.path.join(data.data_dir, "imdisk.png") ) box = self.get_content_area() box.add(label) box.add(image) box.add(label2) self.show_all() dialog = ImDiskDialog(self.component.window) if dialog.run() == Gtk.ResponseType.OK: open_explorer_for_imdisk(self.logger) dialog.close() def activate_menu_etcher(self, widget): class EtcherDialog(Gtk.Dialog): DL_CODE = 2111 def __init__(self, parent): Gtk.Dialog.__init__( self, "Use Etcher to Flash your SD-card", parent, 0, ( "Download Latest Etcher", self.DL_CODE, "Visit Website", Gtk.ResponseType.OK, Gtk.STOCK_OK, Gtk.ResponseType.CANCEL, ), ) self.set_default_size(500, 300) label = Gtk.Label() label.set_markup( "\nUse <b>Etcher</b> to flash your image onto your SD-card" ". It will also <b>validate</b> that the SD-card " "has been <b>successfuly written</b>.\n" "You can even burn the same image " "on <b>several SD-cards at once</b>.\n" ) label.set_alignment(0, 0.5) label2 = Gtk.Label() label2.set_markup( "\nPlease Download and Run <b>Etcher</b> separately.\n" ) label2.set_alignment(0, 0.5) image = Gtk.Image.new_from_file( os.path.join(data.data_dir, "etcher.gif") ) box = self.get_content_area() box.add(label) box.add(image) box.add(label2) self.show_all() dialog = EtcherDialog(self.component.window) ret = dialog.run() if ret == EtcherDialog.DL_CODE: try: req = requests.get( "https://img.shields.io/github/release" "/resin-io/etcher.json" ) version = req.json().get("value") base_url = ( "https://github.com/resin-io/etcher/releases/" "download/{}/".format(version) ) def get_fname(): if sys.platform == "linux": if platform.machine() == "x86_64": return "etcher-electron-{v}-x86_64.AppImage" return "etcher-electron-{v}-i386.AppImage" elif sys.platform == "win32": if platform.machine() == "AMD64": return "Etcher-Portable-{v}-x64.exe" return "Etcher-Portable-{v}-x86.exe" elif sys.platform == "darwin": return "Etcher-{v}.dmg" raise NotImplementedError("platform not supported") etcher_dl_url = base_url + get_fname().format(v=version[1:]) except Exception as exp: etcher_dl_url = data.etcher_url webbrowser.open(etcher_dl_url) elif ret == Gtk.ResponseType.OK: webbrowser.open(data.etcher_url) dialog.close() def changed_build_path(self, widget): """ display Clean cache button only if build-path is set """ self.component.clean_cache_button.set_visible( bool(self.component.build_path_chooser.get_filename().strip()) ) def activate_menu_cache(self, widget): build_folder = self.component.build_path_chooser.get_filename() cache_folder = get_cache(build_folder) cache_size, nb_files, free_space = get_cache_size_and_free_space( build_folder, cache_folder ) class CacheDialog(Gtk.Dialog): WIPE_CODE = 2111 def __init__(self, parent): Gtk.Dialog.__init__( self, "Reclaim space by cleaning-up your cache", parent, 0, ( "Wipe Cache (quick)", self.WIPE_CODE, "Clean Cache (slow)", Gtk.ResponseType.OK, "Close", Gtk.ResponseType.CANCEL, ), ) self.parent = parent self.set_default_size(300, 100) label = Gtk.Label() label.set_markup( "\nKiwix Hotspot maintains <b>a cache of all downloaded files</b> " "and reuse them on future runs.\n" "\nYou can either <b>wipe the cache completely</b> " "or <b>only remove obsolete files</b>.\n" "Obsoletes files are previous version of ZIMs or content packs.\n\n" "Wiping is almost instantaneous.\n" "Cleaning takes several minutes as " "it analyzes files to determine which ones should be kept.\n\n" "Your cache folder is: <i>{cache}</i>.\n" "<u>Cache Disk Usage</u>: <b>{du}</b> ({nb} files)\n" "<u>Free Space</u>: <b>{df}</b>\n".format( cache=cache_folder, du=human_readable_size(cache_size), df=human_readable_size(free_space), nb=nb_files, ) ) label.set_alignment(0, 0.5) self.thread = None self.run_progressbar = Gtk.ProgressBar() self.cancel_button = Gtk.Button("Cancel") self.cancel_button.connect("clicked", self.stop_cache_operation) box = self.get_content_area() box.add(label) box.add(self.run_progressbar) box.add(self.cancel_button) box.add(Gtk.Label("")) # spacer self.show_all() self.run_progressbar.set_visible(False) self.cancel_button.set_visible(False) def start_cache_operation(self, is_wipe): # do nothing if the thread is running if self.thread is not None and self.thread.is_alive(): return # show progress bar and cancel button self.run_progressbar.set_visible(True) self.cancel_button.set_label( "Cancel {}".format("Wiping" if is_wipe else "Cleaning...") ) self.cancel_button.set_visible(True) # start progress bar animation self.timeout_id = GObject.timeout_add(50, self.on_timeout) self.thread = multiprocessing.Process( target=reset_cache if is_wipe else clean_cache, args=(CLILogger(), build_folder, cache_folder), ) self.thread.start() def on_timeout(self): # display post-thread dialog on cancelled thread if self.thread is not None and not self.thread.is_alive(): self.display_post_cache_operation_dialog() return False elif self.thread is None: # stop progress anim if thread not running return False new_value = self.run_progressbar.get_fraction() + 0.035 # inverse direction if end reached if new_value > 1: new_value = 0 # switch from left-to-right to right-to-left at bounds self.run_progressbar.set_inverted( not self.run_progressbar.get_inverted() ) self.run_progressbar.set_fraction(new_value) return True # returns True so it continues to get called def stop_cache_operation(self, *args, **kwargs): if self.thread is not None and self.thread.is_alive(): self.thread.terminate() self.thread = None self.cancel_button.set_visible(False) self.run_progressbar.set_visible(False) self.close() def display_post_cache_operation_dialog(self): msg_box = Gtk.MessageDialog( self.parent, None, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, "Cache Operation Completed", ) cache_size, nb_files, free_space = get_cache_size_and_free_space( build_folder, cache_folder ) content = ( "Cache folder: {cache}.\n" "Cache Disk Usage: {du} ({nb} files)\n" "Free Space: {df}\n\n".format( cache=cache_folder, du=human_readable_size(cache_size), df=human_readable_size(free_space), nb=nb_files, ) ) msg_box.format_secondary_text(content) msg_box.set_modal(True) msg_box.run() msg_box.destroy() self.close() dialog = CacheDialog(self.component.window) ret = dialog.run() if ret == CacheDialog.WIPE_CODE: dialog.start_cache_operation(True) elif ret == Gtk.ResponseType.OK: dialog.start_cache_operation(False) else: dialog.close() def activate_sd_clean(self, widget): sd_card = self.get_sd_card() class SDCleanDialog(Gtk.Dialog): def __init__(self, parent, parent_ui): Gtk.Dialog.__init__( self, "Wipe your SD-card clean before for installation", parent, 0, ( "Wipe {}".format(sd_card), Gtk.ResponseType.OK, "Close", Gtk.ResponseType.CANCEL, ), ) self.parent = parent self.parent_ui = parent_ui self.set_default_size(300, 100) label = Gtk.Label() label.set_markup( "\nFor Kiwix Hotspot to work properly,\n" "you need your SD-card to be cleaned before starting,\n" "hence having just a single FAT-like partition.\n\n" "This process you only take a few minutes.\n" "If this does not end within 10mn,\n" "cancel-it and try clean your SD-card using a different tool.\n\n" ) label.set_alignment(0, 0.5) self.thread = None self.retcode = multiprocessing.Value("i", -1) self.run_progressbar = Gtk.ProgressBar() self.cancel_button = Gtk.Button("Cancel") self.cancel_button.connect("clicked", self.stop_clean_operation) box = self.get_content_area() box.add(label) box.add(self.run_progressbar) box.add(self.cancel_button) box.add(Gtk.Label("")) # spacer self.show_all() self.run_progressbar.set_visible(False) self.cancel_button.set_visible(False) def start_clean_operation(self): # do nothing if the thread is running if self.thread is not None and self.thread.is_alive(): return # show progress bar and cancel button self.run_progressbar.set_visible(True) self.cancel_button.set_label("Cancel Wiping") self.cancel_button.set_visible(True) # start progress bar animation self.timeout_id = GObject.timeout_add(50, self.on_timeout) self.thread = multiprocessing.Process( target=flash_image_with_etcher, args=( os.path.join(data.data_dir, "mbr.img"), sd_card, self.retcode, ), ) self.thread.start() def on_timeout(self): # display post-thread dialog on cancelled thread if self.thread is not None and not self.thread.is_alive(): self.display_post_clean_operation_dialog() return False elif self.thread is None: # stop progress anim if thread not running return False new_value = self.run_progressbar.get_fraction() + 0.035 # inverse direction if end reached if new_value > 1: new_value = 0 # switch from left-to-right to right-to-left at bounds self.run_progressbar.set_inverted( not self.run_progressbar.get_inverted() ) self.run_progressbar.set_fraction(new_value) return True # returns True so it continues to get called def stop_clean_operation(self, *args, **kwargs): if self.thread is not None and self.thread.is_alive(): self.thread.terminate() self.thread = None self.cancel_button.set_visible(False) self.run_progressbar.set_visible(False) self.close() def display_post_clean_operation_dialog(self): if self.retcode.value == 0: title = "SD-card Cleaning Completed" content = ( "Your SD-card ({}) has been wiped.\n\n" "You now need to unplug then replug your device.\n" "Once done, come back and hit the refresh button." ).format(sd_card) else: title = "SD-card Cleaning Failed" content = ( "You SD-card HAS NOT been wiped.\n\n" "Please use a different tool to clean it." ) msg_box = Gtk.MessageDialog( self.parent, None, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, title ) msg_box.format_secondary_text(content) msg_box.set_modal(True) msg_box.run() msg_box.destroy() self.close() self.parent_ui.sd_card_refresh_button_clicked("") dialog = SDCleanDialog(self.component.window, self) ret = dialog.run() if ret == Gtk.ResponseType.OK: dialog.start_clean_operation() else: dialog.close() self.sd_card_refresh_button_clicked("") def installation_done(self, error): ok = error is None validate_label(self.component.done_label, ok) if ok: self.component.done_label.set_text("Installation done") else: self.component.done_label.set_text("Installation failed") self.component.done_window.show() self.component.run_install_running_buttons_revealer.set_reveal_child(False) self.component.run_install_done_buttons_revealer.set_reveal_child(True) def run_text_view_scroll_down(self, widget): text_buffer = self.component.run_text_view.get_buffer() text_buffer.set_modified(False) end = text_buffer.get_end_iter() end.backward_line() self.component.run_text_view.scroll_to_iter(end, 0, True, 0, 1.0) def run_window_delete_event(self, widget, path): return True def cancel_run(self): self.cancel_event.cancel() quit() def run_quit_button_clicked(self, widget): self.cancel_run() def run_abort_button_clicked(self, widget): dialog = ShortDialog( self.component.run_window, ( Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OK, Gtk.ResponseType.OK, ), "Are you sure you want to abort the installation ?\nyou will not be able to resume.", ) response = dialog.run() if response == Gtk.ResponseType.OK: self.cancel_run() dialog.destroy() def run_new_install_button_clicked(self, widget): self.logger.reset() self.component.run_window.hide() self.component.window.show() def display_error_message(self, title, message=None, parent=None, flags=None): if parent is None: parent = self.component.window dialog = Gtk.MessageDialog( parent, flags, Gtk.MessageType.ERROR, Gtk.ButtonsType.OK, title ) if message is not None: dialog.format_secondary_text(message) dialog.set_modal(True) dialog.run() dialog.destroy() def activate_menu_config(self, widget, for_save=False): home_path = os.environ["HomePath" if sys.platform == "win32" else "HOME"] if not for_save and not self.ensure_catalogs(): return def _save(dialog): filename = ( dialog.get_filename() if dialog.get_filename().endswith(".json") else "{}.json".format(dialog.get_filename()) ) try: with open(filename, "w", encoding="utf-8") as fd: json.dump(self.get_config(), fd, indent=4) except Exception: self.display_error_message( "Unable to save JSON configuration to file", "Please check that the path is reachable and writable.", ) def _load(dialog): try: with open(dialog.get_filename(), "r") as fd: config = json.load(fd) except Exception: self.display_error_message( "Unable to load JSON configuration", "Please check that the file is readable " "and in proper JSON format", ) else: self.set_config(config) if for_save: title = "Select a file to save Kiwix Hotspot config to" action = Gtk.FileChooserAction.SAVE on_accept = _save else: title = "Select Kiwix Hotspot config file to load" action = Gtk.FileChooserAction.OPEN on_accept = _load if hasattr(Gtk, "FileChooserNative"): dialog = Gtk.FileChooserNative.new( title, self.component.window, # make it tied to parent and modal action, "OK", "Cancel", ) dialog.set_current_folder(home_path) else: dialog = Gtk.FileChooserDialog( title, self.component.window, action=action, buttons=( Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT, ), ) dialog.set_current_folder(home_path) dialog.set_modal(True) # does not seem to have effect filter_json = Gtk.FileFilter() filter_json.set_name("JSON files") filter_json.add_mime_type("application/json") filter_json.add_pattern("*.json") dialog.add_filter(filter_json) response = dialog.run() if response == Gtk.ResponseType.ACCEPT: on_accept(dialog) dialog.destroy() def set_config(self, config): if not isinstance(config, dict): return # reset all options self.reset_config() # project_name if config.get("project_name") is not None: self.component.project_name_entry.set_text(config.get("project_name")) # language try: value = dict(data.hotspot_languages)[config["language"]] item_tuple = (config["language"], value) item_id = data.hotspot_languages.index(item_tuple) except KeyError: pass else: self.component.language_combobox.set_active(item_id) # timezone try: item_id = [ row_id for row_id, row_data in enumerate(self.component.timezone_tree_store) if row_data[0] == config["timezone"] ][0] except (KeyError, IndexError): pass else: self.component.timezone_combobox.set_active(item_id) # wifi (previous format) if "wifi" in config.keys() and isinstance(config["wifi"], dict): if "protected" in config["wifi"].keys(): self.component.wifi_password_switch.set_active( not bool(config["wifi"]["protected"]) ) if "password" in config["wifi"].keys(): self.component.wifi_password_entry.set_text(config["wifi"]["password"]) # wifi (new format) if "wifi_password" in config.keys(): self.component.wifi_password_switch.set_active( config["wifi_password"] is None ) if config["wifi_password"] is not None: self.component.wifi_password_entry.set_text(config["wifi_password"]) # admin account if "admin_account" in config.keys() and isinstance( config["admin_account"], dict ): for key, arg_key in {"login": "login", "password": "pwd"}.items(): if config["admin_account"].get(key) is not None: getattr( self.component, "admin_account_{}_entry".format(arg_key) ).set_text(config["admin_account"][key]) # branding if "branding" in config.keys() and isinstance(config["branding"], dict): for key in ("logo", "favicon", "css"): if config["branding"].get(key) is not None: try: fpath = b64decode( fname=config["branding"][key]["fname"], data=config["branding"][key]["data"], to=tempfile.mkdtemp(), ) except Exception: pass else: getattr(self.component, "{}_chooser".format(key)).set_filename( fpath ) # build_dir if config.get("build_dir") is not None: self.component.build_path_chooser.set_filename( os.path.abspath(config["build_dir"]) ) # size if config.get("size") is not None: try: size = ( humanfriendly.parse_size(config["size"]) if isinstance(config["size"], str) else config["size"] ) size = int(size / ONE_GB) except Exception: size = None if size is not None: sd_size = min( filter(lambda x: x >= size, data.sdcard_sizes), default=data.sdcard_sizes[-1], ) self.component.size_combobox.set_active( data.sdcard_sizes.index(sd_size) ) # content if "content" in config.keys() and isinstance(config["content"], dict): # langs-related contents for key in ("kalite", "wikifundi"): if key in config["content"].keys() and isinstance( config["content"][key], list ): for lang, button in getattr( self, "iter_{}_check_button".format(key) )(): button.set_active(lang in config["content"][key]) # boolean contents (switches) for key in ("edupi", "aflatoun", "nomad", "mathews"): if config["content"].get(key) is not None: getattr(self.component, "{}_switch".format(key)).set_active( config["content"][key] ) # edupi resources if config["content"].get("edupi_resources") is not None: rsc = config["content"].get("edupi_resources") if isremote(rsc): self.component.edupi_resources_url_entry.set_text(str(rsc)) else: self.component.edupi_resources_chooser.set_filename(str(rsc)) if "zims" in config["content"].keys() and isinstance( config["content"]["zims"], list ): nb_zims = len(self.component.zim_tree_view.get_model()) index = 0 nb_selected = 0 while index < (nb_zims - nb_selected): try: zim = self.component.zim_tree_view.get_model()[index] except IndexError: break selected = zim[0] in config["content"]["zims"] self.component.zim_tree_view.get_model()[index][8] = selected if selected: nb_selected += 1 else: index += 1 continue self.update_free_space() def get_config(self): try: language_id = self.component.language_combobox.get_active() language = data.hotspot_languages[language_id][0] except Exception: language = None try: timezone_id = self.component.timezone_combobox.get_active() timezone = self.component.timezone_tree_store[timezone_id][0] except Exception: timezone = None edupi_resources = self.get_edupi_resources() if edupi_resources is not None: if not isremote(edupi_resources): edupi_resources = relpathto(self.get_edupi_resources()) zim_install = [] for zim in self.component.zim_list_store: if zim[8]: zim_install.append(zim[0]) kalite_active_langs = [ lang for lang, button in self.iter_kalite_check_button() if button.get_active() ] wikifundi_active_langs = [ lang for lang, button in self.iter_wikifundi_check_button() if button.get_active() ] try: size = data.sdcard_sizes[self.component.size_combobox.get_active()] * ONE_GB except Exception: size = None branding = {} for key in ("logo", "favicon", "css"): fpath = getattr(self.component, "{}_chooser".format(key)).get_filename() if fpath is not None and os.path.exists(fpath): try: branding[key] = { "fname": os.path.basename(fpath), "data": b64encode(fpath), } except Exception: pass return OrderedDict( [ ("project_name", self.component.project_name_entry.get_text()), ("language", language), ("timezone", timezone), ( "wifi_password", None if self.component.wifi_password_switch.get_active() else self.component.wifi_password_entry.get_text(), ), ( "admin_account", OrderedDict( [ ( "login", self.component.admin_account_login_entry.get_text(), ), ( "password", self.component.admin_account_pwd_entry.get_text(), ), ] ), ), ( "build_dir", relpathto(self.component.build_path_chooser.get_filename()), ), ("size", None if size is None else human_readable_size(size, False)), ( "content", OrderedDict( [ ("zims", zim_install), # content-ids list ("kalite", kalite_active_langs), # languages list ("wikifundi", wikifundi_active_langs), # languages list ("aflatoun", self.component.aflatoun_switch.get_active()), ("edupi", self.component.edupi_switch.get_active()), ("edupi_resources", edupi_resources), ("nomad", self.component.nomad_switch.get_active()), ("mathews", self.component.mathews_switch.get_active()), ] ), ), ("branding", branding), ] ) def reset_run_window(self): self.component.run_install_done_buttons_revealer.set_reveal_child(False) self.component.run_install_running_buttons_revealer.set_reveal_child(True) self.component.run_text_view.get_buffer().set_text("") self.logger.update() def run_copy_log_to_clipboard_button_clicked(self, widget): text_buffer = self.component.run_text_view.get_buffer() start = text_buffer.get_start_iter() end = text_buffer.get_end_iter() hidden = True clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) clipboard.set_text(text_buffer.get_text(start, end, hidden), -1) def get_sd_card(self): if self.component.output_stack.get_visible_child_name() == "sd_card": sd_card_id = self.component.sd_card_combobox.get_active() if sd_card_id == -1: return None else: device_index = sd_card_info.get_device_index() return self.component.sd_card_list_store[sd_card_id][device_index] return None def run_installation_button_clicked(self, button): all_valid = True # capture input project_name = self.component.project_name_entry.get_text() language = data.hotspot_languages[ self.component.language_combobox.get_active() ][0] timezone = self.component.timezone_tree_store[ self.component.timezone_combobox.get_active() ][0] wifi_pwd = ( None if self.component.wifi_password_switch.get_state() else self.component.wifi_password_entry.get_text() ) admin_login = self.component.admin_account_login_entry.get_text() admin_pwd = self.component.admin_account_pwd_entry.get_text() zim_install = [zim[0] for zim in self.component.zim_list_store if zim[8]] # validate inputs ( valid_project_name, valid_language, valid_timezone, valid_wifi_pwd, valid_admin_login, valid_admin_pwd, ) = check_user_inputs( project_name=self.component.project_name_entry.get_text(), language=data.hotspot_languages[ self.component.language_combobox.get_active() ][0], timezone=self.component.timezone_tree_store[ self.component.timezone_combobox.get_active() ][0], wifi_pwd=None if self.component.wifi_password_switch.get_state() else self.component.wifi_password_entry.get_text(), admin_login=self.component.admin_account_login_entry.get_text(), admin_pwd=self.component.admin_account_pwd_entry.get_text(), ) # project name validate_label(self.component.project_name_label, valid_project_name) self.component.project_name_constraints_revealer.set_reveal_child( not valid_project_name ) all_valid = all_valid and valid_project_name # language validate_label(self.component.language_label, valid_language) all_valid = all_valid and valid_language # timezone validate_label(self.component.timezone_label, valid_timezone) all_valid = all_valid and valid_timezone # wifi passwd validate_label(self.component.wifi_password_label, valid_wifi_pwd) self.component.wifi_password_constraints_revealer.set_reveal_child( not valid_wifi_pwd ) all_valid = all_valid and valid_wifi_pwd # admin account validate_label(self.component.admin_account_login_label, valid_admin_login) validate_label(self.component.admin_account_pwd_label, valid_admin_pwd) self.component.admin_account_login_constraints_revealer.set_reveal_child( not valid_admin_login ) self.component.admin_account_pwd_constraints_revealer.set_reveal_child( not valid_admin_pwd ) all_valid = all_valid and valid_admin_login and valid_admin_pwd output_size = self.get_output_size() sd_card = self.get_sd_card() if self.component.output_stack.get_visible_child_name() == "sd_card": condition = sd_card is not None validate_label(self.component.sd_card_label, condition) all_valid = all_valid and condition # check that SD card has a single partition (clean state) condition = sd_has_single_partition(sd_card, self.logger) validate_label(self.component.sd_card_label, condition) validate_label(self.component.sd_card_error_label, condition) self.component.sd_card_error_label.set_visible(not condition) all_valid = all_valid and condition else: condition = output_size > 0 validate_label(self.component.size_label, condition) all_valid = all_valid and condition condition = self.update_free_space() >= 0 validate_label(self.component.free_space_name_label, condition) all_valid = all_valid and condition kalite_active_langs = [ lang for lang, button in self.iter_kalite_check_button() if button.get_active() ] if len(kalite_active_langs) != 0: kalite = kalite_active_langs else: kalite = None wikifundi_active_langs = [ lang for lang, button in self.iter_wikifundi_check_button() if button.get_active() ] if len(wikifundi_active_langs) != 0: wikifundi = wikifundi_active_langs else: wikifundi = None aflatoun = self.component.aflatoun_switch.get_active() edupi = self.component.edupi_switch.get_active() nomad = self.component.nomad_switch.get_active() mathews = self.component.mathews_switch.get_active() logo = self.component.logo_chooser.get_filename() favicon = self.component.favicon_chooser.get_filename() css = self.component.css_chooser.get_filename() build_dir = self.component.build_path_chooser.get_filename() condition = ( build_dir is not None and os.path.exists(build_dir) and os.path.isdir(build_dir) ) validate_label(self.component.build_path_chooser_label, condition) all_valid = all_valid and condition # Check if there is enough space in build_dir to build image if condition: free_space = get_free_space_in_dir(build_dir) remaining_space = free_space - output_size if remaining_space < 0: self.component.space_error_image_location_label.set_text(build_dir) self.component.space_error_total_space_required_label.set_text( human_readable_size(output_size) ) self.component.space_error_space_available_label.set_text( human_readable_size(free_space) ) self.component.space_error_space_missing_label.set_text( human_readable_size(-remaining_space) ) self.component.space_error_window.show() all_valid = False all_valid = all_valid and self.ensure_connection() and self.ensure_catalogs() if all_valid: def target(): run_installation( name=project_name, timezone=timezone, language=language, wifi_pwd=wifi_pwd, kalite=kalite, wikifundi=wikifundi, aflatoun=aflatoun, edupi=edupi, edupi_resources=self.get_edupi_resources(), nomad=nomad, mathews=mathews, zim_install=zim_install, size=output_size, logger=self.logger, cancel_event=self.cancel_event, sd_card=sd_card, logo=logo, favicon=favicon, css=css, build_dir=build_dir, admin_account={"login": admin_login, "pwd": admin_pwd}, done_callback=lambda error: GLib.idle_add( self.installation_done, error ), shrink=True, ) self.component.window.hide() self.reset_run_window() self.component.run_window.show() threading.Thread(target=target, daemon=True).start() def on_sdcard_selection_change(self, button): has_card = self.component.sd_card_combobox.get_active() != -1 self.component.clean_sd_button.set_visible(has_card) # remove warnings on combo change validate_label(self.component.sd_card_label, True) validate_label(self.component.sd_card_error_label, True) self.component.sd_card_error_label.set_visible(False) def sd_card_refresh_button_clicked(self, button): self.refresh_disk_list() self.update_free_space() def refresh_disk_list(self): active_id = self.component.sd_card_combobox.get_active() if active_id != -1: selected_device = self.component.sd_card_list_store[active_id] selected_device = selected_device[sd_card_info.get_device_index()] else: selected_device = None self.component.sd_card_list_store.clear() for id, device in enumerate(sd_card_info.get_iterator()): items = [ info["typ"](device[info["name"]]) for info in sd_card_info.informations ] self.component.sd_card_list_store.append(items) device_name = str(device["device"]).rstrip("\0") if device_name == selected_device: self.component.sd_card_combobox.set_active(id) def zim_choose_content_button_clicked(self, button): if self.ensure_catalogs(): self.component.zim_window.show() def get_edupi_resources(self): local_rsc = self.component.edupi_resources_chooser.get_filename() remote_rsc = self.component.edupi_resources_url_entry.get_text() return (remote_rsc if remote_rsc else local_rsc) or None def get_free_space(self): zim_list = [] for zim in self.component.zim_list_store: if zim[8]: zim_list.append(zim[0]) kalite = [] for lang, button in self.iter_kalite_check_button(): if button.get_active(): kalite.append(lang) wikifundi = [] for lang, button in self.iter_wikifundi_check_button(): if button.get_active(): wikifundi.append(lang) aflatoun = self.component.aflatoun_switch.get_active() edupi = self.component.edupi_switch.get_active() edupi_resources = self.get_edupi_resources() nomad = self.component.nomad_switch.get_active() mathews = self.component.mathews_switch.get_active() collection = get_collection( edupi=edupi, edupi_resources=edupi_resources, nomad=nomad, mathews=mathews, packages=zim_list, kalite_languages=kalite, wikifundi_languages=wikifundi, aflatoun_languages=["fr", "en"] if aflatoun else [], ) try: required_image_size = get_required_image_size(collection) except FileNotFoundError: self.display_error_message( "Free Space Calculation Error", "Unable to calculate free space due to a missing file.\n" "Please, check if the EduPi resources file is still there.", self.component.window, ) return -1 return self.get_output_size() - required_image_size def update_free_space(self): free_space = self.get_free_space() human_readable_free_space = human_readable_size(free_space) self.component.free_space_label1.set_text(human_readable_free_space) self.component.free_space_label2.set_text(human_readable_free_space) condition = free_space >= 0 validate_label(self.component.free_space_label1, condition) validate_label(self.component.free_space_label2, condition) # size should be at least base_image size size = self.get_output_size() validate_label( self.component.size_combobox, size >= get_content("hotspot_master_image")["expanded_size"], ) for row in self.component.zim_list_store: if free_space - int(row[9]) >= 0: row[11] = VALID_RGBA else: row[11] = INVALID_RGBA return free_space def get_output_size(self): if self.component.output_stack.get_visible_child_name() == "sd_card": sd_card_id = self.component.sd_card_combobox.get_active() if sd_card_id == -1: size = -1 else: get_size_index = sd_card_info.get_size_index() size = int( self.component.sd_card_list_store[sd_card_id][get_size_index] ) else: try: size = get_adjusted_image_size( data.sdcard_sizes[self.component.size_combobox.get_active()] * ONE_GB ) except Exception: size = -1 return size def zim_language_selection_changed(self, selection): model, rows = selection.get_selected_rows() selected_languages = set() for row in rows: selected_languages.add(model[row][0]) for zim in self.component.zim_list_store: zim[10] = len(zim[5] & selected_languages) != 0 def available_zim_clicked(self, tree_view, path, column): tree_view.get_model()[path][8] = True tree_view.get_selection().unselect_all() self.update_free_space() def choosen_zim_clicked(self, tree_view, path, column): tree_view.get_model()[path][8] = False tree_view.get_selection().unselect_all() self.update_free_space() def zim_done_button_clicked(self, widget): self.component.zim_window.hide() def zim_filter_func(self, model, iter, data): return model[iter][10] and not model[iter][8] def choosen_zim_filter_func(self, model, iter, data): return model[iter][8] Application() run()
ideascube/pibox-installer
kiwix-hotspot/gui.py
Python
gpl-3.0
78,877
[ "VisIt" ]
70452939f923666d7327842dd0fbe727f91729f6447e4712f05b1a64e1bf7671
""" @created_at 2014-06-09 @author Exequiel Fuentes <efulet@gmail.com> @author Brian Keith <briankeithn@gmail.com> """ # Se recomienda seguir los siguientes estandares: # 1. Para codificacion: PEP 8 - Style Guide for Python Code (http://legacy.python.org/dev/peps/pep-0008/) # 2. Para documentacion: PEP 257 - Docstring Conventions (http://legacy.python.org/dev/peps/pep-0257/) class LaberintoExcepcion(Exception): """LaberintoExcepcion maneja las excepciones para la clase Laberinto Como usar esta clase: raise LaberintoExcepcion("Archivo de entrada no existe") """ def __init__(self, value): self.value = value def __str__(self): return repr(self.value)
efulet/laberinto
laberinto/lib/laberinto_excepcion.py
Python
mit
712
[ "Brian" ]
1c62e9a5daf15748956f3f6ff6fcc4af60138f1182147bb4dd4350f4025dfdca
import pandas as pd from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.neural_network import MLPClassifier from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import BernoulliNB from sklearn.naive_bayes import MultinomialNB from sklearn import metrics from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split import numpy def get_naive_bayes_models(): gnb = GaussianNB() mnb = MultinomialNB() bnb = BernoulliNB() classifier_list = [gnb,mnb,bnb] classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB'] return classifier_list,classifier_name_list def get_neural_network(hidden_layer_size=50): mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size) return [mlp], ['MultiLayer Perceptron'] def get_ensemble_models(): rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3) bagg = BaggingClassifier(n_estimators=71,random_state=42) extra = ExtraTreesClassifier(n_estimators=57,random_state=42) ada = AdaBoostClassifier(n_estimators=51,random_state=42) grad = GradientBoostingClassifier(n_estimators=101,random_state=42) classifier_list = [rf,bagg,extra,ada,grad] classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost'] return classifier_list,classifier_name_list def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test): print '--------- For Model : ', trained_model_name predicted_values = trained_model.predict(X_test) print metrics.classification_report(y_test,predicted_values) print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values) print "---------------------------------------\n" filename = 'train.csv' train_frame = pd.read_csv(filename) train_frame['T'] = map(lambda x: float(x)/200000.0,train_frame['T'].values) device_labels = train_frame['Device'].values del train_frame['Device'] X_train,X_test,y_train,y_test = train_test_split(train_frame.values,device_labels,test_size=0.2,random_state=42) classifier_list, classifier_name_list = get_ensemble_models() for classifier, classifier_name in zip(classifier_list,classifier_name_list): classifier.fit(X_train,y_train) print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
rupakc/Kaggle-Compendium
Accelerometer Biometric Competition/accelerate-baseline.py
Python
mit
2,539
[ "Gaussian" ]
97b9bb7ae8799a83a70f9e440aad1c6913ffccac0511a8e2f50f6a88241a29a5
""" Parser function parse() to parse the .wout output file of Wannier90. """ from __future__ import print_function import inspect import re from collections import defaultdict from . import show_output # Match the lines describing the nearest-neighbour Shells # Groups: # 0: Shell Index # 1: distance (ang^-1) # 2: multiplicity near_neigh_re = re.compile("^\s*\|\s+(\d+)\s+([\d\.]+)\s*(\d+)\s*") # Match the lines describing the b_k vectors for the completeness relation # Groups: # 0: Index # 1: b_k(x) # 2: b_k(y) # 3: b_k(z) # 4: w_b completeness_re = re.compile("^\s*\|\s+(\d+)\s+([\d\.-]+)\s+([\d\.-]+)\s+([\d\.-]+)\s*([\d\.]+)\s*") # Match the 'WF centre and spread' line. # Groups: # 0: idx # 1: centre_x # 2: centre_y # 3: centre_z # 4: spread spread_re = re.compile("^\s*WF centre and spread\s+(\d+)\s+\(\s*([0-9\.-]+)\s*,\s*([0-9\.-]+)\s*,\s*([0-9\.-]+)\s*\)\s*([0-9\.-]+)\s*$") # Match the lines with the Omegas # Groups: # 0: Omega_* omegaI_re = re.compile("Omega\ I\s+=\s*([0-9\.-]+)\s*$") omegaD_re = re.compile("Omega\ D\s+=\s*([0-9\.-]+)\s*$") omegaOD_re = re.compile("Omega\ OD\s+=\s*([0-9\.-]+)\s*$") omegaTotal_re = re.compile("Omega\ Total\s+=\s*([0-9\.-]+)\s*$") omegaIOD_C_re = re.compile("Omega\ IOD_C\s+=\s*([0-9\.-]+)\s*$") omegaRest_re = re.compile("Omega\ Rest\s+=\s*([0-9\.-]+)\s*$") penaltyfunc_re = re.compile("Penalty\ func\s+=\s*([0-9\.-]+)\s*$") omegaTotal_C_re = re.compile("Omega\ Total_C\s+=\s*([0-9\.-]+)\s*$") ## A comment on regexps: re.match only checks the beginning of the line, while ## re.search anywhere in the string (like perl) def parse(fname): """ Open the file, parses it and return the values """ retdict = defaultdict(list) if show_output: print("[{}.{}] Parsing file '{}'".format( __name__, inspect.currentframe().f_code.co_name, fname)) with open(fname) as f: lines = f.readlines() for lno, l in enumerate(lines): ############################################################### # Nearest-neighbour Shells # Start from the fourth line after # 'Distance to Nearest-Neighbour Shells', # then stop at the line with ------------------ if "Distance to Nearest-Neighbour Shells" in l: for l2 in lines[lno+4:]: # Skip 4 lines match = near_neigh_re.search(l2) if not match or '--------------------------------------' in l2: break _, dist, mult = match.groups() retdict["near_neigh_dist"].append(float(dist)) retdict["near_neigh_mult"].append(int(mult)) continue ############################################################### # Completeness relation # Start from the sixth line after # 'Completeness relation is fully satisfied', # then stop at the line with ------------------ if "Completeness relation is fully satisfied" in l: for l2 in lines[lno+6:]: # Skip 6 lines match = completeness_re.search(l2) if not match or '--------------------------------------' in l2: break _, bkx, bky, bkz, bkw = match.groups() retdict["completeness_x"].append(float(bkx)) retdict["completeness_y"].append(float(bky)) retdict["completeness_z"].append(float(bkz)) retdict["completeness_weight"].append(float(bkw)) continue ############################################################### # Final state spreads and centres: get all lines after # 'Final state' that contain the spreads if "Final State" in l: for l2 in lines[lno+1:]: match = spread_re.search(l2) if not match: break _, x, y, z, s = match.groups() retdict["final_centres_x"].append(float(x)) retdict["final_centres_y"].append(float(y)) retdict["final_centres_z"].append(float(z)) retdict["final_spreads"].append(float(s)) continue ############################################################### # various Omegas (four numbers, typically at the end) match = omegaI_re.search(l) if match: retdict["omegaI"].append(float(match.groups()[0])) continue match = omegaD_re.search(l) if match: retdict["omegaD"].append(float(match.groups()[0])) continue match = omegaOD_re.search(l) if match: retdict["omegaOD"].append(float(match.groups()[0])) continue match = omegaTotal_re.search(l) if match: retdict["omegaTotal"].append(float(match.groups()[0])) continue match = omegaIOD_C_re.search(l) if match: retdict["omegaIOD_C"].append(float(match.groups()[0])) continue match = omegaRest_re.search(l) if match: retdict["omegaRest"].append(float(match.groups()[0])) continue match = penaltyfunc_re.search(l) if match: retdict["penaltyfunc"].append(float(match.groups()[0])) continue match = omegaTotal_C_re.search(l) if match: retdict["omegaTotal_C"].append(float(match.groups()[0])) continue ############################################################### retdict = dict(retdict) if show_output: for k in sorted(retdict): print(" {}: {}".format(k, retdict[k])) print("-"*72) return retdict
mostofi/wannier90
test-suite/tools/parsers/parse_wout.py
Python
gpl-2.0
5,713
[ "Wannier90" ]
5f970aa0284cd27c73d8092d228c352e257e5e2437d747d693f3f328ca03bcec
# Copyright (C) 2012,2013,2015 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ****************************** **espressopp.FixedTripleList** ****************************** .. function:: espressopp.FixedTripleList(storage) :param storage: :type storage: .. function:: espressopp.FixedTripleList.add(pid1, pid2, pid3) :param pid1: :param pid2: :param pid3: :type pid1: :type pid2: :type pid3: :rtype: .. function:: espressopp.FixedTripleList.addTriples(triplelist) :param triplelist: :type triplelist: :rtype: .. function:: espressopp.FixedTripleList.getTriples() :rtype: .. function:: espressopp.FixedTripleList.size() :rtype: """ from espressopp import pmi import _espressopp import espressopp from espressopp.esutil import cxxinit class FixedTripleListLocal(_espressopp.FixedTripleList): def __init__(self, storage): if pmi.workerIsActive(): cxxinit(self, _espressopp.FixedTripleList, storage) def add(self, pid1, pid2, pid3): if pmi.workerIsActive(): return self.cxxclass.add(self, pid1, pid2, pid3) def addTriples(self, triplelist): """ Each processor takes the broadcasted triplelist and adds those triples whose first particle is owned by this processor. """ if pmi.workerIsActive(): for triple in triplelist: pid1, pid2, pid3 = triple self.cxxclass.add(self, pid1, pid2, pid3) def size(self): if pmi.workerIsActive(): return self.cxxclass.size(self) ''' def addTriples(self, triplelist): """ Each processor takes the broadcasted triplelist and adds those triples whose first particle is owned by this processor. """ if pmi.workerIsActive(): for triple in triplelist: pid1, pid2, pid3 = triple self.cxxclass.add(self, pid1, pid2, pid3) ''' def getTriples(self): if pmi.workerIsActive(): triples = self.cxxclass.getTriples(self) return triples if pmi.isController: class FixedTripleList(object): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espressopp.FixedTripleListLocal', localcall = [ "add" ], pmicall = [ "addTriples" ], pmiinvoke = ["getTriples", "size"] )
junghans/espressopp
src/FixedTripleList.py
Python
gpl-3.0
3,234
[ "ESPResSo" ]
2b51aa5d34a8b722a08a1c91d4a176bb089745aad931dc8303dec2e9bf81c184
""" Defines the plugin to return the boolean given as param """ __RCSID__ = "$Id $" from DIRAC.Resources.Catalog.ConditionPlugins.FCConditionBasePlugin import FCConditionBasePlugin class DummyPlugin( FCConditionBasePlugin ): """ This plugin is to be used to simply return True or False """ def __init__( self, conditions ): """ the condition can be True or False: """ super( DummyPlugin, self ).__init__( conditions ) def eval( self, **kwargs ): """ evaluate whether the conditon is True or False """ return eval( self.conditions )
Andrew-McNab-UK/DIRAC
Resources/Catalog/ConditionPlugins/DummyPlugin.py
Python
gpl-3.0
577
[ "DIRAC" ]
8fdf581ca26c5fd90ed34a3a1d827fa80c3b54effe0c4c5f14e333b49509b20f
#!/usr/bin/env python #-*- encoding:utf-8 -*- ##ref https://secure.flickr.com/services/api/misc.urls.html ##ref https://secure.flickr.com/services/api/flickr.photos.search.html ##ref https://secure.flickr.com/services/api/explore/flickr.photos.search import json import os import time import requests import urllib from PIL import Image from StringIO import StringIO from requests.exceptions import ConnectionError def get_img(query, path, img_num): """Download full size images from Google image search. Don't print or republish images without permission. I used this to train a learning algorithm. """ api_key = 'ba12e9d62ea20cf4a6c6aab4baa0c2f1' #if meet api_key Error please visit https://secure.flickr.com/services/api/explore/flickr.photos.search and push the "Call method" button ,and then search api_key in the reply page BASE_URL = 'https://secure.flickr.com/services/rest/?method=flickr.photos.search&format=json&api_key=' + api_key + '&text='+ query.replace(' ','+') +'&per_page=' + str(img_num) BASE_PATH = os.path.join(path, query) if not os.path.exists(BASE_PATH): os.makedirs(BASE_PATH) r = requests.get(BASE_URL) title=1 for image_info in json.loads(r.text[14:-1])['photos']['photo']: file = os.path.join(BASE_PATH, '%s.jpg') % str(title) url = 'http://farm' + str(image_info['farm']) + '.staticflickr.com/' + str(image_info['server']) + '/' + image_info['id'] + '_' + image_info['secret'] + '_b.jpg' try: print 'Fetching %s ' % url urllib.urlretrieve(url, file) # os.system('aria2c "' + url + '" -o ' + file) # os.system('wget "' + url + '" -O ' + file) print 'save as %s' % file except ConnectionError, e: print 'could not download %s' % url continue # else : # print 'download %s is uncomplete' % url # continue title += 1 #get_img('query', 'Directory',30) #The URL takes the following format: # #http://farm{farm-id}.staticflickr.com/{server-id}/{id}_{secret}.jpg # or #http://farm{farm-id}.staticflickr.com/{server-id}/{id}_{secret}_[mstzb].jpg # or #http://farm{farm-id}.staticflickr.com/{server-id}/{id}_{o-secret}_o.(jpg|gif|png) # #The letter suffixes are as follows: #s small square 75x75 #q large square 150x150 #t thumbnail, 100 on longest side #m small, 240 on longest side #n small, 320 on longest side #- medium, 500 on longest side #z medium 640, 640 on longest side #c medium 800, 800 on longest side† #b large, 1024 on longest side* #o original image, either a jpg, gif or png, depending on source format #Example # #http://farm1.staticflickr.com/2/1418878_1e92283336_m.jpg # #farm-id: 1 #server-id: 2 #photo-id: 1418878 #secret: 1e92283336 #size: m
gaofeihifly/fetch_image
fkr.py
Python
gpl-3.0
2,815
[ "VisIt" ]
480b41fb1aea697d290df7890207b8c076df784f39424d8dfcdcc6c2d01ffd77
#!/usr/bin/env python # -*- coding: utf-8 -*- # # # ------------------------------------------------------------------------------ # Copyright (C) 2006-2009 University of Dundee. All rights reserved. # # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # ------------------------------------------------------------------------------ ### # # ROIDrawingCanvas draws the shapes from the obejct visited. # These map to the ROI types in omero.model.* # # @author Jean-Marie Burel &nbsp;&nbsp;&nbsp;&nbsp; # <a href="mailto:j.burel@dundee.ac.uk">j.burel@dundee.ac.uk</a> # @author Donald MacDonald &nbsp;&nbsp;&nbsp;&nbsp; # <a href="mailto:donald@lifesci.dundee.ac.uk">donald@lifesci.dundee.ac.uk # </a> # @version 3.0 # <small> # (<b>Internal version:</b> $Revision: $Date: $) # </small> # @since 3.0-Beta4 # """ Example code to draw 10 ellipses randomly on an image:: import ROI_utils; import ROIDrawingUtils; from random import randint; l = []; for i in range(0,10): e = ROI_utils.EllipseData( ROI_utils.ROICoordinate(), randint(100, 300), randint(100, 300), randint(20, 50), randint(20, 50)) l.append(e); d = ROIDrawingUtils.DrawingCanvas(); d.createImage(400,400) v = d.drawElements(l); d.image.show() Example code to draw a polyline on an image an display it in PIL:: try: from PIL import Image, ImageDraw # see ticket:2597 except ImportError: import Image, ImageDraw # see ticket:2597 import ROI_utils import ROIDrawingUtils drawingCanvas = ROIDrawingUtils.DrawingCanvas(); points = [10,30, 40, 80, 100, 150] polygonData = ROI_utils.PolylineData(ROI_utils.ROICoordinate(), points); drawingCanvas.createImage(400,400); drawingCanvas.drawElements([polygonData]); drawingCanvas.image.show() """ try: from PIL import Image, ImageDraw # see ticket:2597 except ImportError: import Image import ImageDraw # see ticket:2597 ## # Drawing canvas allows the creation of shapes on an # image using PIL, the class can be supplied with an # image and will write on that or can create an image. # The object will also visit a list of objects supplied # and draw their respective shapes if they accept the # DrawingCanvas visior. # class DrawingCanvas: ## # Create the default object. # def __init__(self): self.width = 0 self.height = 0 self.image = None self.draw = None ## # Create a new image to draw on with width, height # and background colour (0,0,0,0) # @param width See above. # @param height See above. def createImage(self, width, height): self.image = Image.new('RGBA', (width, height), (0, 0, 0, 0)) self.width = width self.height = height ## # Set the image to draw on as image which has width, height. # @param image The image to draw on. # @param width See above. # @param height See above. def setImage(self, image, width, height): self.image = image self.width = width self.height = height ## # Visit all the elements in the element list and draw their shapes. # @param elementList See above. def drawElements(self, elementList): if(self.draw is None): self.draw = ImageDraw.Draw(self.image) for element in elementList: element.acceptVisitor(self) return self.image ## # Get the fill colour from the ShapeSettings object from it's tuple. # @param shapeSetting See above. # def getFillColour(self, shapeSettings): return shapeSettings[1][0] ## # Get the stroke colour from the ShapeSettings object from it's tuple. # @param shapeSetting See above. # def getStrokeColour(self, shapeSettings): return shapeSettings[0][0] ## # Get the stroke width from the ShapeSettings object from it's tuple. # @param shapeSetting See above. # def getStrokeWidth(self, shapeSettings): return shapeSettings[0][1] ## # Draw an ellipse at (cx, cy) with major and minor axis (rx,ry). # @param cx See above. # @param cy See above. # @param rx See above. # @param ry See above. # @param shapeSettings The shapes display properties(colour,etc). # @param affineTransform The affine transform that the shape has to # undergo before drawing. def drawEllipse(self, cx, cy, rx, ry, shapeSettings, affineTransform=None): x = cx - rx y = cy - ry w = x + rx * 2 h = y + ry * 2 fillColour = self.getFillColour(shapeSettings) strokeColour = self.getStrokeColour(shapeSettings) self.draw.ellipse((x, y, w, h), fill=fillColour, outline=strokeColour) ## # Draw a rectangle at (x, y) with width, height (width, height). # @param x See above. # @param y See above. # @param width See above. # @param height See above. # @param shapeSettings The shapes display properties(colour,etc). # @param affineTransform The affine transform that the shape has to # undergo before drawing. def drawRectangle(self, x, y, w, h, shapeSettings, affineTransform=None): fillColour = self.getFillColour(shapeSettings) strokeColour = self.getStrokeColour(shapeSettings) if(affineTransform is None): self.draw.rectangle( (x, y, w, h), fill=fillColour, outline=strokeColour) else: im = Image.new('RGBA', (self.width, self.height), (0, 0, 0, 0)) newDraw = ImageDraw.Draw(im) newDraw.rectangle( (x, y, w, h), fill=fillColour, outline=strokeColour) newImage = im.transform( (self.width, self.height), Image.AFFINE, affineTransform) self.image.paste(newImage) ## # Draw an polygon with the points in pointTupleList # which are [(x1, y1), (x2, y2)...]. # @param pointTupleList See above. # @param shapeSettings The shapes display properties(colour,etc). # @param affineTransform The affine transform that the shape has to # undergo before drawing. def drawPolygon(self, pointTupleList, shapeSettings, affineTransform=None): fillColour = self.getFillColour(shapeSettings) strokeColour = self.getStrokeColour(shapeSettings) if(affineTransform is None): self.draw.polygon( pointTupleList, fill=fillColour, outline=strokeColour) else: im = Image.new('RGBA', (self.width, self.height), (0, 0, 0, 0)) ImageDraw.Draw(im) self.draw.polygon( pointTupleList, fill=fillColour, outline=strokeColour) newImage = im.transform( (self.width, self.height), Image.AFFINE, affineTransform) self.image.paste(newImage) ## # Draw a line from (x1, y1) to (x2,y2). # @param x1 See above. # @param y1 See above. # @param x2 See above. # @param y2 See above. # @param shapeSettings The shapes display properties(colour,etc). # @param affineTransform The affine transform that the shape has to # undergo before drawing. def drawLine(self, x1, y1, x2, y2, shapeSettings, affineTransform=None): strokeColour = self.getStrokeColour(shapeSettings) strokeWidth = self.getStrokeWidth(shapeSettings) if(affineTransform is None): self.draw.line( [(x1, y1), (x2, y2)], fill=strokeColour, width=strokeWidth) else: im = Image.new('RGBA', (self.width, self.height), (0, 0, 0, 0)) ImageDraw.Draw(im) self.draw.line( [(x1, y1), (x2, y2)], fill=strokeColour, width=strokeWidth) newImage = im.transform( (self.width, self.height), Image.AFFINE, affineTransform) self.image.paste(newImage) ## # Draw an polyline with the points in pointTupleList # which are [(x1, y1), (x2, y2)...]. # @param pointTupleList See above. # @param shapeSettings The shapes display properties(colour,etc). # @param affineTransform The affine transform that the shape has to # undergo before drawing. def drawPolyline(self, pointTupleList, shapeSettings, affineTransform=None): fillColour = self.getFillColour(shapeSettings) strokeColour = self.getStrokeColour(shapeSettings) strokeWidth = self.getStrokeWidth(shapeSettings) if(affineTransform is None): self.draw.line(pointTupleList, fill=fillColour, width=strokeWidth) else: im = Image.new('RGBA', (self.width, self.height), (0, 0, 0, 0)) ImageDraw.Draw(im) self.draw.line( pointTupleList, fill=strokeColour, width=strokeColour) newImage = im.transform( (self.width, self.height), Image.AFFINE, affineTransform) self.image.paste(newImage) ## # Draw a mask at (x, y) with (width, height). # @param x See above. # @param y See above. # @param width See above. # @param height See above. # @param bytes The mask in bytes. # @param shapeSettings The shapes display properties(colour,etc). # @param affineTransform The affine transform that the shape has to # undergo before drawing. def drawMask(self, x, y, width, height, bytes, shapeSettings, affineTransform=None): fillColour = self.getFillColour(shapeSettings) mask = Image.fromstring('1', (width, height), bytes) if(affineTransform is None): self.draw.bitmap(x, y, mask, fill=fillColour) else: im = Image.new('RGBA', (self.width, self.height), (0, 0, 0, 0)) ImageDraw.Draw(im) self.draw.bitmap(x, y, mask, fill=fillColour) newImage = im.transform( (self.width, self.height), Image.AFFINE, affineTransform) self.image.paste(newImage) ## # Draw text at (x, y) with major and minor axis (rx,ryr). # @param x See above. # @param y See above. # @param text The text to draw. # @param shapeSettings The shapes display properties(colour,etc). # @param affineTransform The affine transform that the shape has to # undergo before drawing. def drawText(self, x, y, text, shapeSettings, affineTransform=None): textColour = self.getStrokeColour(shapeSettings) if(affineTransform is None): self.draw.text((x, y), text, fill=textColour) else: im = Image.new('RGBA', (self.width, self.height), (0, 0, 0, 0)) ImageDraw.Draw(im) self.draw.text((x, y), text, fill=textColour) newImage = im.transform( (self.width, self.height), Image.AFFINE, affineTransform) self.image.paste(newImage)
dominikl/openmicroscopy
components/tools/OmeroPy/src/omero/util/ROIDrawingUtils.py
Python
gpl-2.0
11,753
[ "VisIt" ]
a422d06c6f04a0f8d7a70104443d861508d1cb12cfe449002a5efd04891bc74c
""" This module contains an interface to I/O using the python binding of the OpenBabel library ( http://openbabel.org/ ), supporting over 90 different chemical file formats. In order to avoid a hard dependency on yet another library, the idea is to use this module as a fallback in case none of the builtin methods work. The OpenBabel Python interface is described in O'Boyle et al., Chem. Cent. J., 2, 5 (2008), doi:10.1186/1752-153X-2-5 This module has been contributed by Janne Blomqvist """ from ase.atoms import Atoms, Atom def guess_format(filename, read=True): """Babel specific file format guesser. filename: str Name of file to guess format of. read: bool Are we trying to read; if False we are writing """ if filename.endswith('.gz'): filename = filename[:-3] elif filename.endswith('.bz2'): filename = filename[:-4] lastdot = filename.rfind('.') if lastdot != -1: import pybel ext = filename[(lastdot+1):] if read: if ext in pybel.informats: return ext else: if ext in pybel.outformats: return ext return None def read_babel(filename, format=None, index=-1): """Read a file containing one or more images using OpenBabel Returns the image given by the index argument. Doesn't try to get unit cell, pbc, constraint or such, only the atomic symbols and coordinates. Also, chemists and molecular biologists have the weird idea to overload chemical symbol names with charge states and whatever. However, ASE doesn't understand this, e.g. that the chemical symbol 'Me1' might mean the CH3 group in a methanol molecule, and hence symbol names might be messed up. """ import pybel if format == None or format.lower() == 'babel': format = guess_format(filename, True) images = [] for mol in pybel.readfile(format, filename): atoms = [] for atom in mol.atoms: atoms.append(Atom(atom.atomicnum, atom.coords)) images.append(Atoms(atoms)) return images[index] def write_babel(filename, images, format=None): """Write a set of images with OpenBabel Similar to the read_babel function, only cares about atomic symbols and coordinates. """ import pybel import openbabel as ob if not isinstance(images, (list, tuple)): images = [images] if format == None: format = guess_format(filename, False) outfile = pybel.Outputfile(format, filename, overwrite=True) for image in images: # image is an ase.Atoms object mol = ob.OBMol() for atom in image: a = mol.NewAtom() a.SetAtomicNum(atom.number) c = atom.position a.SetVector(c[0], c[1], c[2]) pmol = pybel.Molecule(mol) outfile.write(pmol) outfile.close() if __name__ == '__main__': import sys a = read_babel(sys.argv[1]) write_babel(sys.argv[2], a)
freephys/python_ase
ase/io/babel.py
Python
gpl-3.0
3,031
[ "ASE", "Pybel" ]
08dbaf9b19b51d65c1e8a8996c2097e6545d23e9b2a2dc90730495d40216ea45
# # Copyright (C) 2007, Mark Lee # #http://rl-glue-ext.googlecode.com/ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # $Revision: 592 $ # $Date: 2009-02-05 00:24:59 +0100 (Thu, 05 Feb 2009) $ # $Author: brian@tannerpages.com $ # $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/network/Network.py $ # #The Network class is defined in here # import socket import struct import array import time import sys import numpy from io import BytesIO try: import numpy numpy_int_type = numpy.dtype('int32').newbyteorder('>') numpy_float_type = numpy.dtype('float64').newbyteorder('>') numpy_char_type = 'S1'#numpy.dtype('uint8').newbyteorder('>') except: pass from rlglue.types import Action from rlglue.types import Observation from rlglue.types import Reward_observation_terminal from rlglue.types import RL_Abstract_Type # RL-Glue needs to know what type of object is trying to connect. kExperimentConnection = 1 kAgentConnection = 2 kEnvironmentConnection = 3 kAgentInit = 4 # agent_* start by sending one of these values kAgentStart = 5 # to the client to let it know what type of kAgentStep = 6 # event to respond to kAgentEnd = 7 kAgentCleanup = 8 kAgentMessage = 10 kEnvInit = 11 kEnvStart = 12 kEnvStep = 13 kEnvCleanup = 14 kEnvMessage = 19 kRLInit = 20 kRLStart = 21 kRLStep = 22 kRLCleanup = 23 kRLReturn = 24 kRLNumSteps = 25 kRLNumEpisodes = 26 kRLEpisode = 27 kRLAgentMessage = 33 kRLEnvMessage = 34 kRLTerm = 35 kLocalHost = "127.0.0.1" kDefaultPort = 4096 kRetryTimeout = 2 kDefaultBufferSize = 4096 kIntSize = 4 kDoubleSize = 8 kCharSize = 1 kUnknownMessage = "Unknown Message: %s\n" class Network: def __init__(self): self.sock = None self.recvBuffer = BytesIO() self.sendBuffer = BytesIO() if 'numpy' in globals(): self.getAbstractType = self.getAbstractType_numpy else: self.getAbstractType = self.getAbstractType_list def connect(self, host=kLocalHost, port=kDefaultPort, retryTimeout=kRetryTimeout): while self.sock == None: try: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.sock.connect((host, port)) except socket.error as msg: self.sock = None time.sleep(retryTimeout) else: break def close(self): self.sock.close() def send(self): self.sock.sendall(self.sendBuffer.getvalue()) def recv(self,size): s = b'' while len(s) < size: s += self.sock.recv(size - len(s)) self.recvBuffer.write(s) self.recvBuffer.seek(0) return len(s) def clearSendBuffer(self): self.sendBuffer.close() self.sendBuffer = BytesIO() def clearRecvBuffer(self): self.recvBuffer.close() self.recvBuffer = BytesIO() def flipSendBuffer(self): self.clearSendBuffer() def flipRecvBuffer(self): self.clearRecvBuffer() def getInt(self): s = self.recvBuffer.read(kIntSize) return struct.unpack("!i",s)[0] def getDouble(self): s = self.recvBuffer.read(kDoubleSize) return struct.unpack("!d",s)[0] def getString(self): #If you read 0 you get "" not None so that's fine length = self.getInt() return self.recvBuffer.read(length) def getAbstractType_list(self): numInts = self.getInt() numDoubles = self.getInt() numChars = self.getInt() returnStruct=RL_Abstract_Type() if numInts > 0: s = self.recvBuffer.read(numInts*kIntSize) returnStruct.intArray = list(struct.unpack("!%di" % (numInts),s)) if numDoubles > 0: s = self.recvBuffer.read(numDoubles*kDoubleSize) returnStruct.doubleArray = list(struct.unpack("!%dd" % (numDoubles),s)) if numChars > 0: s = self.recvBuffer.read(numChars*kCharSize) returnStruct.charArray = list(struct.unpack("!%dc" % (numChars),s)) return returnStruct def getAbstractType_numpy(self): numInts = self.getInt() numDoubles = self.getInt() numChars = self.getInt() returnStruct=RL_Abstract_Type() if numInts > 0: s = self.recvBuffer.read(numInts*kIntSize) assert kIntSize == 4 returnStruct.intArray = numpy.frombuffer(s, dtype=numpy_int_type, count=numInts) if numDoubles > 0: s = self.recvBuffer.read(numDoubles*kDoubleSize) returnStruct.doubleArray = numpy.frombuffer(s, count=numDoubles, dtype=numpy_float_type) if numChars > 0: s = self.recvBuffer.read(numChars*kCharSize) returnStruct.charArray = numpy.frombuffer(s, count=numChars, dtype=numpy_char_type) return returnStruct def getObservation(self): return Observation.fromAbstractType(self.getAbstractType()) def getAction(self): return Action.fromAbstractType(self.getAbstractType()) def putInt(self,value): self.sendBuffer.write(struct.pack("!i",value)) def putDouble(self,value): self.sendBuffer.write(struct.pack("!d",value)) def putString(self,value): if value == None: value = b'' if type(value) is str: value = bytes(value, encoding='utf-8') else: value = bytes(value) self.putInt(len(value)) self.sendBuffer.write(value) def putObservation(self,obs): self.putAbstractType(obs) def putAction(self,action): self.putAbstractType(action) def putAbstractType(self, theItem): self.putInt(len(theItem.intArray)) self.putInt(len(theItem.doubleArray)) self.putInt(len(theItem.charArray)) if len(theItem.intArray) > 0: self.sendBuffer.write(struct.pack("!%di" % (len(theItem.intArray)),*(theItem.intArray))) if len(theItem.doubleArray) > 0: self.sendBuffer.write(struct.pack("!%dd" % (len(theItem.doubleArray)),*(theItem.doubleArray))) if len(theItem.charArray) > 0: cs = theItem.charArray if type(cs[0]) is str: bts = [bytes(c, encoding='utf-8') for c in cs] elif type(cs[0]) is numpy.bytes_: bts = [bytes(c) for c in cs] else: bts = cs self.sendBuffer.write(struct.pack("!%dc" % len(bts), *bts)) def putRewardObservation(self,rewardObservation): self.putInt(rewardObservation.terminal); self.putDouble(rewardObservation.r); self.putObservation(rewardObservation.o); def sizeOfAbstractType(self, theItem): size = kIntSize * 3 intSize = 0 doubleSize = 0 charSize = 0 if theItem != None: if theItem.intArray is not None: intSize = kIntSize * len(theItem.intArray) if theItem.doubleArray is not None: doubleSize = kDoubleSize * len(theItem.doubleArray) if theItem.charArray is not None: charSize = kCharSize * len(theItem.charArray) return size + intSize + doubleSize + charSize def sizeOfAction(self,action): return self.sizeOfAbstractType(action) def sizeOfObservation(self,observation): return self.sizeOfAbstractType(observation) def sizeOfRewardObservation(self,reward_observation): return kIntSize + kDoubleSize + self.sizeOfObservation(reward_observation.o)
steckdenis/rlglue-py3
rlglue/network/Network.py
Python
apache-2.0
9,981
[ "Brian" ]
40fae74c6f2e8bea1aaec063a769ebee5155c99db5465402baf1cc60a911bf9b
# Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Predict volumes of crystal structures. """ import os import warnings import numpy as np from monty.serialization import loadfn from pymatgen.analysis.bond_valence import BVAnalyzer from pymatgen.analysis.structure_matcher import StructureMatcher from pymatgen.core import Structure MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) bond_params = loadfn(os.path.join(MODULE_DIR, "DLS_bond_params.yaml")) def _is_ox(structure): comp = structure.composition for k in comp.keys(): try: k.oxi_state except AttributeError: return False return True class RLSVolumePredictor: """ Reference lattice scaling (RLS) scheme that predicts the volume of a structure based on a known crystal structure. """ def __init__(self, check_isostructural=True, radii_type="ionic-atomic", use_bv=True): """ Args: check_isostructural: Whether to test that the two structures are isostructural. This algo works best for isostructural compounds. Defaults to True. radii_type (str): Types of radii to use. You can specify "ionic" (only uses ionic radii), "atomic" (only uses atomic radii) or "ionic-atomic" (uses either ionic or atomic radii, with a preference for ionic where possible). use_bv (bool): Whether to use BVAnalyzer to determine oxidation states if not present. """ self.check_isostructural = check_isostructural self.radii_type = radii_type self.use_bv = use_bv def predict(self, structure, ref_structure): """ Given a structure, returns the predicted volume. Args: structure (Structure): structure w/unknown volume ref_structure (Structure): A reference structure with a similar structure but different species. Returns: a float value of the predicted volume """ if self.check_isostructural: m = StructureMatcher() mapping = m.get_best_electronegativity_anonymous_mapping(structure, ref_structure) if mapping is None: raise ValueError("Input structures do not match!") if "ionic" in self.radii_type: try: # Use BV analyzer to determine oxidation states only if the # oxidation states are not already specified in the structure # and use_bv is true. if (not _is_ox(structure)) and self.use_bv: a = BVAnalyzer() structure = a.get_oxi_state_decorated_structure(structure) if (not _is_ox(ref_structure)) and self.use_bv: a = BVAnalyzer() ref_structure = a.get_oxi_state_decorated_structure(ref_structure) comp = structure.composition ref_comp = ref_structure.composition # Check if all the associated ionic radii are available. if any(k.ionic_radius is None for k in list(comp.keys())) or any( k.ionic_radius is None for k in list(ref_comp.keys()) ): raise ValueError("Not all the ionic radii are available!") numerator = 0 denominator = 0 # Here, the 1/3 factor on the composition accounts for atomic # packing. We want the number per unit length. for k, v in comp.items(): numerator += k.ionic_radius * v ** (1 / 3) for k, v in ref_comp.items(): denominator += k.ionic_radius * v ** (1 / 3) return ref_structure.volume * (numerator / denominator) ** 3 except Exception: warnings.warn("Exception occured. Will attempt atomic radii.") # If error occurs during use of ionic radii scheme, pass # and see if we can resolve it using atomic radii. pass if "atomic" in self.radii_type: comp = structure.composition ref_comp = ref_structure.composition # Here, the 1/3 factor on the composition accounts for atomic # packing. We want the number per unit length. numerator = 0 denominator = 0 for k, v in comp.items(): numerator += k.atomic_radius * v ** (1 / 3) for k, v in ref_comp.items(): denominator += k.atomic_radius * v ** (1 / 3) return ref_structure.volume * (numerator / denominator) ** 3 raise ValueError("Cannot find volume scaling based on radii choices specified!") def get_predicted_structure(self, structure, ref_structure): """ Given a structure, returns back the structure scaled to predicted volume. Args: structure (Structure): structure w/unknown volume ref_structure (Structure): A reference structure with a similar structure but different species. Returns: a Structure object with predicted volume """ new_structure = structure.copy() new_structure.scale_lattice(self.predict(structure, ref_structure)) return new_structure class DLSVolumePredictor: """ Data-mined lattice scaling (DLS) scheme that relies on data-mined bond lengths to predict the crystal volume of a given structure. As of 2/12/19, we suggest this method be used in conjunction with min_scaling and max_scaling to prevent instances of very large, unphysical predicted volumes found in a small subset of structures. """ def __init__(self, cutoff=4.0, min_scaling=0.5, max_scaling=1.5): """ Args: cutoff (float): cutoff radius added to site radius for finding site pairs. Necessary to increase only if your initial structure guess is extremely bad (atoms way too far apart). In all other instances, increasing cutoff gives same answer but takes more time. min_scaling (float): if not None, this will ensure that the new volume is at least this fraction of the original (preventing too-small volumes) max_scaling (float): if not None, this will ensure that the new volume is at most this fraction of the original (preventing too-large volumes) """ self.cutoff = cutoff self.min_scaling = min_scaling self.max_scaling = max_scaling def predict(self, structure, icsd_vol=False): """ Given a structure, returns the predicted volume. Args: structure (Structure) : a crystal structure with an unknown volume. icsd_vol (bool) : True if the input structure's volume comes from ICSD. Returns: a float value of the predicted volume. """ # Get standard deviation of electronnegativity in the structure. std_x = np.std([site.specie.X for site in structure]) # Sites that have atomic radii sub_sites = [] # Record the "DLS estimated radius" from bond_params. bp_dict = {} for sp in list(structure.composition.keys()): if sp.atomic_radius: sub_sites.extend([site for site in structure if site.specie == sp]) else: warnings.warn(f"VolumePredictor: no atomic radius data for {sp}") if sp.symbol not in bond_params: warnings.warn(f"VolumePredictor: bond parameters not found, used atomic radii for {sp}") else: r, k = bond_params[sp.symbol]["r"], bond_params[sp.symbol]["k"] bp_dict[sp] = float(r) + float(k) * std_x # Structure object that include only sites with known atomic radii. reduced_structure = Structure.from_sites(sub_sites) smallest_ratio = None for site1 in reduced_structure: sp1 = site1.specie neighbors = reduced_structure.get_neighbors(site1, sp1.atomic_radius + self.cutoff) for nn in neighbors: sp2 = nn.specie if sp1 in bp_dict and sp2 in bp_dict: expected_dist = bp_dict[sp1] + bp_dict[sp2] else: expected_dist = sp1.atomic_radius + sp2.atomic_radius if not smallest_ratio or nn.nn_distance / expected_dist < smallest_ratio: smallest_ratio = nn.nn_distance / expected_dist if not smallest_ratio: raise ValueError("Could not find any bonds within the given cutoff in this structure.") volume_factor = (1 / smallest_ratio) ** 3 # icsd volume fudge factor if icsd_vol: volume_factor *= 1.05 if self.min_scaling: volume_factor = max(self.min_scaling, volume_factor) if self.max_scaling: volume_factor = min(self.max_scaling, volume_factor) return structure.volume * volume_factor def get_predicted_structure(self, structure, icsd_vol=False): """ Given a structure, returns back the structure scaled to predicted volume. Args: structure (Structure): structure w/unknown volume Returns: a Structure object with predicted volume """ new_structure = structure.copy() new_structure.scale_lattice(self.predict(structure, icsd_vol=icsd_vol)) return new_structure
vorwerkc/pymatgen
pymatgen/analysis/structure_prediction/volume_predictor.py
Python
mit
9,808
[ "CRYSTAL", "pymatgen" ]
e889e3bc0e707292f69c4f73bd850c514c01c123a2f4846f5e16bd3ba00688e6
""" # Copyright (C) 2007 Nathan Ramella (nar@remix.net) # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # For questions regarding this module contact # Nathan Ramella <nar@remix.net> or visit http://www.liveapi.org RemixNet Module This module contains four classes that have been assembled to facilitate remote control of Ableton Live. It's been an interesting experience learning Python and has given me a lot of time to think about music and networking protocols. I used OSC as it's somewhat of an accepted protocol and at least more flexible than MIDI. It's not the quickest protocol in terms of pure ops, but it gets the job done. For most uses all you'll need to do is create an OSCServer object, it in turn creates an OSCClient and registers a couple default callbacks for you to test with. Both OSCClient and OSCServer create their own UDP sockets this is settable on initialization and during runtime if you wish to change them. Any input or feedback on this code will always be appreciated and I look forward to seeing what will come next. -Nathan Ramella (nar@remix.net) -Updated 29/04/09 by ST8 (st8@q3f.org) Works on Mac OSX with Live7/8 The socket module is missing on osx and including it from the default python install doesnt work. Turns out its the os module that causes all the problems, removing dependance on this module and packaging the script with a modified version of the socket module allows it to run on osx. """ import inspect import os import sys import Live # Import correct paths for os / version version = Live.Application.get_application().get_major_version() if sys.platform == "win32": import socket else: if version > 7: # 10.5 try: file = open("/usr/lib/python2.5/string.pyc") except IOError: sys.path.append("/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5") import socket_live8 as socket else: sys.path.append("/usr/lib/python2.5") import socket # OSC from OSCMessage import OSCMessage from CallbackManager import CallbackManager from OSCUtils import * class UDPClient: """ This is a fairly brain-dead UDPClient implementation that is used by the OSCClient to send packets out. You shouldn't need this unless you want to get tricky or make a linewire protocol. """ def __init__(self, dst=None, dstPort=None): """ When the OSCClient instantiates its UDPClient it passes along: - dst: The destination host. If none only send to localhost. - dstPort: The destination port. If none, 9001 by default. """ if dst: self.dst = dst else: # If you'd like to try broadcast, # set this to <broadcast> # I've been unable to get it to work. self.dst = 'localhost' if dstPort: self.dstPort = dstPort else: self.dstPort = 9001 def setDstPort(self, dstPort=None): """ If the port gets reset midstream, close down our UDPSock and reopen to be sure. A little redundant. """ # Manually set the port before init if not dstPort: return self.DstPort = DstPort if self.UDPSock: self.UDPSock.close() self.open() def setDst(self, dst=None): """ If the dst gets reset midstream, we close down our UDPSock and reopen. A little redundant. """ if not dst: return self.dst = dst if self.UDPSock: self.UDPSock.close() self.open() def open(self): """ Open our UDPSock for listening, sets self.UDPSock """ if not self.dst: return if not self.dstPort: return # Open up our socket, we're ready for business! self.addr = (self.dst,self.dstPort) self.UDPSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) #Broadcast doesn't work for answering callbacks for some reason. #But, I'll leave this here if you'd like to try. #if self.dst == '<broadcast>': # self.UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) def send(self, data): """ If we have data to send, send it, otherwise return. """ # Only send if we have data. if not data == '': self.UDPSock.sendto(data,self.addr) data = '' def close(self): """ Close our UDPSock """ # Closing time! self.UDPSock.close()
shouldmakemusic/yaas
LiveOSC/UDPClient.py
Python
gpl-2.0
5,672
[ "VisIt" ]
bf9947e80ce51f8d411a712e0e91cc0bc7efe3f858e2617544e40eaff5db4513
# -*- coding: utf-8 -*- import datetime import traceback from south.db import db from south.v2 import DataMigration from django.conf import settings from django.contrib.auth.models import User from django.db import models from django.db import transaction import re from catmaid.control.common import get_relation_to_id_map, get_class_to_id_map from catmaid.control.tracing import check_tracing_setup_detailed, setup_tracing def log(msg, indent=0): """ Provides basic log output. """ print("[Annotations] %s%s" % (" " * indent, msg)) class Traverser(): """ This class is able to migrate a single project. This translation is based on the following rules (class instance and node will be used interchangeably): 1. Root: Only paths that start at the node 'root' will be looked at. The root node itself will not become an annotation. 2. Empty folder nodes will not be migrated. 3. IST: Neurons within the 'Isolated synaptic terminals' folder don't get any special annotation. 4. Fragments: Group nodes on any level below the fragments node will become individual annotations. The fragments node itself won't become an annotation and all annotations collected before will be forgotten. 5. Staging: The staging folder and the user folders below it won't become an annotation. However, everything below the user folders is subject to all rules again. 6. All other folders: They become individual annotations. 7. Neuron names: If they don't contain a semi-colon (';'), names stay as they are. If they do, everything before the first semi-colon will be the neuron name, every semi-colon enclosed token afterwards will become an annotation. 8. Permissions: owner of the link of new annotations to neurons will be the owner of the part_of node-node link. """ def __init__(self, orm, project, class_map, relation_map): self.orm = orm self.p = project self.class_map = class_map self.relation_map = relation_map # Have a cache for annotation IDs self.annotation_cache = {} # Have a set of pre-defined patterns to match folder names self.fragments_pattern = re.compile(r'^[Ff]ragments?') # Keep track of all visited nodes in case they appear in multiple times # in different branches. self.visited_nodes = set() def run(self, start_node): """ Starts the traversal from the given node and provides some extra output. """ log("Starting migration of project #%s" % self.p.id) self.traverse(start_node, False); # Give some output about the used annotations log("Done with traversing object tree. The following annotations %s " \ "have been found and used:" % len(self.annotation_cache), 1) log(', '.join(self.annotation_cache.keys()), 2) def annotate_neuron(self, neuron, depth, annotations): """ This method will parse the neuron name for annotations and will add them along with the extra annotations passed. The neuron name will be split on semi-colons (';') and all tokens, starting from the second (!) will be trimmed and added as annotations (if not empty). """ tokens = neuron.name.split(';') cleaned_tokens = [t.strip() for t in tokens[1:] if t.strip()] annotations = annotations.union(set(cleaned_tokens)) ann_text = ', '.join(annotations) if annotations else 'none' log("-> Annotations (neuron): %s" % ann_text, depth + 1) # Rename neuron to only have first token neuron.name = tokens[0] neuron.save() # Add annotations to neuron for a in annotations: # Make sure the annotation's class instance exists. if a not in self.annotation_cache: ci, created = self.orm.ClassInstance.objects.get_or_create( project_id=self.p.id, name=a, class_column_id=self.class_map['annotation'], defaults={'user_id': neuron.link_user_id}); self.annotation_cache[a] = ci.id # Get annotation ID from cache a_id = self.annotation_cache[a] # Link the annotation cici, created = self.orm.ClassInstanceClassInstance.objects.get_or_create( project_id=self.p.id, relation_id=self.relation_map['annotated_with'], class_instance_a_id=neuron.id, class_instance_b_id=a_id, defaults={ 'user_id': neuron.link_user_id, }) cici.save() # update the last edited tim def traverse(self, node, folder_annotations, path="", depth=0, annotations=set()): """ This method traverses the existing object tree to collect annotations which are eventually linked to neurons. If the 'folder_annotations' option is true, the folder name will not be looked at and no annotation will be potentially created from it. """ if node.id in self.visited_nodes: log("Ignoring node, it has been seen before", indent) return # Mark node as visited self.visited_nodes.add(node.id) # Set indentation levels for output on this level indent = depth + 1 # Be default, the folders on the *next* level will be looked at next_folder_annotations = True # Output and path update path = "%s > %s" % (path, node.name) log("Node: %s" % node.name, indent) log("-> Path: %s" % path, indent) # Test if deal with a neuron if node.class_column_id == self.class_map['neuron']: self.annotate_neuron(node, depth, annotations) # Only look at folder names if requested elif folder_annotations: if re.match(self.fragments_pattern, node.name): # Fragments folder: forget all annotations annotations = set() elif node.name == 'Isolated synaptic terminals': # 'Isolated synaptic terminals' folder: don't add annotation # for it pass elif node.name == 'Staging': # Staging folder: the naming folders below shouldn't get # annotations next_folder_annotations = False else: # All other nodes: add trimmed name as annotation annotations = annotations.copy() annotations.add(node.name.strip()) # Get linked nodes, annotated whether they are neurons, and # traverse them linked_nodes = self.orm.ClassInstance.objects.filter(project=self.p.id, cici_via_a__relation=self.relation_map['part_of'], cici_via_a__class_instance_b=node).extra(select={ 'link_user_id': 'class_instance_class_instance.user_id' }).order_by('id') for ln in linked_nodes: self.traverse(ln, next_folder_annotations, path, depth+1, annotations) class Migration(DataMigration): def test_tracing_setup(self, orm, p, class_map, relation_map): """ Tests if the given project is setup for tracing. If it seems it should be (i.e. it has a root class and a root node instance), but is missing some needed things, the user is given the option to get this automatically fixed. If (s)he doesn't want to or the project is doesn't appear to be a tracing project, a RuntimeError is raised. """ # First check if the project qualifies for further steps by testing # whether it actually has a tracing root node. If not, it is skipped. if not ('root' in class_map and orm.ClassInstance.objects.filter( class_column=class_map['root'], project_id=p.id).exists()): raise RuntimeError("Skipping project #%s, because tracing isn't " \ "set up for it" % p.id) # Since this project is apparently a tracing project, make sure tracing # is set-up properly for it. If not, ask the user if missing classes and # relations should be created. setup_okay, mc, mr, mci = check_tracing_setup_detailed(p.id, class_map, relation_map, check_root_ci=False) if not setup_okay: indent = 1 log("This project seems to be up for tracing in principle, but " \ "it isn't setup properly:", indent) if mc: log("Missing classes: %s" % ', '.join(mc), indent + 1) if mr: log("Missing relations: %s" % ', '.join(mr), indent + 1) if mci: log("Missing class instances: %s" % ', '.join(mci), indent + 1) log("This migration can add the missing bits, if wanted. It will " \ "skip this project if not.", 1) should_fix = None while should_fix not in ['yes', 'no']: should_fix = raw_input("Should the missing information be " \ "added automatically? (yes/no)") # Skip project if the answer isn't positive if should_fix != 'yes': raise RuntimeError("Skipping project, because of user's " \ "choice to not setup tracing properly.") # Fix setup otherwise and continue. Use the first super user # available to do that. super_user = User.objects.filter(is_superuser=True).order_by('id')[0] setup_tracing(p.id, super_user) log("The missing bits have been added.", indent) def forwards(self, orm): """ This migration will parse the data structures that form the object tree to create annotations to neurons. These annotations will eventually replace the object tree. For details on how every project is handled, have a look at the 'migrate_project' method. """ # Return without doing anything, if there are no class instances at all. # In such a case there isn't anything to migrate and the questsion below # could cause confusion. if orm.ClassInstance.objects.all().count() == 0: return answer = None while answer not in ['yes', 'no', 'skip']: answer = raw_input("This migration will create new data (annotations)" \ " in the database, based on existing data. It cannot be reversed." \ " Please make sure you have an up-to-date backup. Only data" \ " related to the object tree will be changed. Do you want to" \ " continue? (yes/no/skip) ") if answer == 'skip': return if answer != 'yes': raise RuntimeError("Migration stopped by user") # Wrap all database operations in a transaction. Therefore, disable # Django's autocommit. db.start_transaction() try: # Migrate every available project for p in orm.Project.objects.order_by('id'): if p.id == settings.ONTOLOGY_DUMMY_PROJECT_ID: log("Skipping special purpose project #%s: %s" % (p.id, p.title)) continue else: log("Looking at project #%s: %s" % (p.id, p.title)) class_map = get_class_to_id_map(p.id) relation_map = get_relation_to_id_map(p.id) try: self.test_tracing_setup(orm, p, class_map, relation_map) # If any where added new, update class_map = get_class_to_id_map(p.id) relation_map = get_relation_to_id_map(p.id) except RuntimeError as e: log(e.message, 1) continue # A project available here, can be expected to be setup for tracing # properly. Therefore, annotations can now be created for every neuron # of this project. The existing object tree data structure will be # traversed in a similar way to how the CATMAID front-end works. # Start at the root node and traverse all folder in there root_node = orm.ClassInstance.objects.filter(project=p.id, class_column=class_map['root']).get() traverser = Traverser(orm, p, class_map, relation_map) traverser.run(root_node) # Let user know we are done with this project log("Finished migration of project #%s" % p.id) # Commit transaction if everything worked well db.commit_transaction() except Exception as e: db.rollback_transaction() log("Something went went wrong, rolling back changes: %s" % \ e.message) traceback.print_exc() raise RuntimeError("Couldn't apply migration") def backwards(self, orm): print("This data migration cannot be reversed.") models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'catmaid.apikey': { 'Meta': {'object_name': 'ApiKey'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'catmaid.brokenslice': { 'Meta': {'object_name': 'BrokenSlice', 'db_table': "'broken_slice'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'index': ('django.db.models.fields.IntegerField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}) }, 'catmaid.cardinalityrestriction': { 'Meta': {'object_name': 'CardinalityRestriction', 'db_table': "'cardinality_restriction'"}, 'cardinality_type': ('django.db.models.fields.IntegerField', [], {}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'restricted_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassClass']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'value': ('django.db.models.fields.IntegerField', [], {}) }, 'catmaid.changerequest': { 'Meta': {'object_name': 'ChangeRequest', 'db_table': "'change_request'"}, 'approve_action': ('django.db.models.fields.TextField', [], {}), 'completion_time': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Connector']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'change_recipient'", 'db_column': "'recipient_id'", 'to': "orm['auth.User']"}), 'reject_action': ('django.db.models.fields.TextField', [], {}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Treenode']"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'validate_action': ('django.db.models.fields.TextField', [], {}) }, 'catmaid.class': { 'Meta': {'object_name': 'Class', 'db_table': "'class'"}, 'class_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.classclass': { 'Meta': {'object_name': 'ClassClass', 'db_table': "'class_class'"}, 'class_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes_a'", 'db_column': "'class_a'", 'to': "orm['catmaid.Class']"}), 'class_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes_b'", 'db_column': "'class_b'", 'to': "orm['catmaid.Class']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.classinstance': { 'Meta': {'object_name': 'ClassInstance', 'db_table': "'class_instance'"}, 'class_column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Class']", 'db_column': "'class_id'"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.classinstanceclassinstance': { 'Meta': {'object_name': 'ClassInstanceClassInstance', 'db_table': "'class_instance_class_instance'"}, 'class_instance_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cici_via_a'", 'db_column': "'class_instance_a'", 'to': "orm['catmaid.ClassInstance']"}), 'class_instance_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cici_via_b'", 'db_column': "'class_instance_b'", 'to': "orm['catmaid.ClassInstance']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.concept': { 'Meta': {'object_name': 'Concept', 'db_table': "'concept'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.connector': { 'Meta': {'object_name': 'Connector', 'db_table': "'connector'"}, 'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connector_editor'", 'db_column': "'editor_id'", 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'review_time': ('django.db.models.fields.DateTimeField', [], {}), 'reviewer_id': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.connectorclassinstance': { 'Meta': {'object_name': 'ConnectorClassInstance', 'db_table': "'connector_class_instance'"}, 'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}), 'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Connector']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.constraintstosegmentmap': { 'Meta': {'object_name': 'ConstraintsToSegmentMap'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'origin_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'segments': ('catmaid.fields.IntegerArrayField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'target_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}) }, 'catmaid.dataview': { 'Meta': {'ordering': "('position',)", 'object_name': 'DataView', 'db_table': "'data_view'"}, 'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}), 'config': ('django.db.models.fields.TextField', [], {'default': "'{}'"}), 'data_view_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.DataViewType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'title': ('django.db.models.fields.TextField', [], {}) }, 'catmaid.dataviewtype': { 'Meta': {'object_name': 'DataViewType', 'db_table': "'data_view_type'"}, 'code_type': ('django.db.models.fields.TextField', [], {}), 'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.TextField', [], {}) }, 'catmaid.deprecatedappliedmigrations': { 'Meta': {'object_name': 'DeprecatedAppliedMigrations', 'db_table': "'applied_migrations'"}, 'id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'}) }, 'catmaid.deprecatedsession': { 'Meta': {'object_name': 'DeprecatedSession', 'db_table': "'sessions'"}, 'data': ('django.db.models.fields.TextField', [], {'default': "''"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_accessed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'session_id': ('django.db.models.fields.CharField', [], {'max_length': '26'}) }, 'catmaid.drawing': { 'Meta': {'object_name': 'Drawing', 'db_table': "'drawing'"}, 'component_id': ('django.db.models.fields.IntegerField', [], {}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_x': ('django.db.models.fields.IntegerField', [], {}), 'max_y': ('django.db.models.fields.IntegerField', [], {}), 'min_x': ('django.db.models.fields.IntegerField', [], {}), 'min_y': ('django.db.models.fields.IntegerField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'skeleton_id': ('django.db.models.fields.IntegerField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'svg': ('django.db.models.fields.TextField', [], {}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'z': ('django.db.models.fields.IntegerField', [], {}) }, 'catmaid.location': { 'Meta': {'object_name': 'Location', 'db_table': "'location'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'location_editor'", 'db_column': "'editor_id'", 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'review_time': ('django.db.models.fields.DateTimeField', [], {}), 'reviewer_id': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.log': { 'Meta': {'object_name': 'Log', 'db_table': "'log'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'freetext': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'operation_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.message': { 'Meta': {'object_name': 'Message', 'db_table': "'message'"}, 'action': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'text': ('django.db.models.fields.TextField', [], {'default': "'New message'", 'null': 'True', 'blank': 'True'}), 'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'title': ('django.db.models.fields.TextField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.overlay': { 'Meta': {'object_name': 'Overlay', 'db_table': "'overlay'"}, 'default_opacity': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'file_extension': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_base': ('django.db.models.fields.TextField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'tile_height': ('django.db.models.fields.IntegerField', [], {'default': '512'}), 'tile_source_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'tile_width': ('django.db.models.fields.IntegerField', [], {'default': '512'}), 'title': ('django.db.models.fields.TextField', [], {}) }, 'catmaid.project': { 'Meta': {'object_name': 'Project', 'db_table': "'project'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'stacks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catmaid.Stack']", 'through': "orm['catmaid.ProjectStack']", 'symmetrical': 'False'}), 'title': ('django.db.models.fields.TextField', [], {}) }, 'catmaid.projectstack': { 'Meta': {'object_name': 'ProjectStack', 'db_table': "'project_stack'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'orientation': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'translation': ('catmaid.fields.Double3DField', [], {'default': '(0, 0, 0)'}) }, 'catmaid.regionofinterest': { 'Meta': {'object_name': 'RegionOfInterest', 'db_table': "'region_of_interest'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'height': ('django.db.models.fields.FloatField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'rotation_cw': ('django.db.models.fields.FloatField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'width': ('django.db.models.fields.FloatField', [], {}), 'zoom_level': ('django.db.models.fields.IntegerField', [], {}) }, 'catmaid.regionofinterestclassinstance': { 'Meta': {'object_name': 'RegionOfInterestClassInstance', 'db_table': "'region_of_interest_class_instance'"}, 'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'region_of_interest': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.RegionOfInterest']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.relation': { 'Meta': {'object_name': 'Relation', 'db_table': "'relation'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'isreciprocal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'uri': ('django.db.models.fields.TextField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.relationinstance': { 'Meta': {'object_name': 'RelationInstance', 'db_table': "'relation_instance'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.restriction': { 'Meta': {'object_name': 'Restriction', 'db_table': "'restriction'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'restricted_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassClass']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.segments': { 'Meta': {'object_name': 'Segments'}, 'assembly': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']", 'null': 'True'}), 'cost': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'direction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'origin_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'origin_slice_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'randomforest_cost': ('django.db.models.fields.FloatField', [], {}), 'segmentation_cost': ('django.db.models.fields.FloatField', [], {}), 'segmentid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'segmenttype': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}), 'target1_slice_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'target2_slice_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'target_section': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.segmenttoconstraintmap': { 'Meta': {'object_name': 'SegmentToConstraintMap'}, 'constraint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ConstraintsToSegmentMap']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'origin_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'segment_node_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'segmentid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'target_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}) }, 'catmaid.settings': { 'Meta': {'object_name': 'Settings', 'db_table': "'settings'"}, 'key': ('django.db.models.fields.TextField', [], {'primary_key': 'True'}), 'value': ('django.db.models.fields.TextField', [], {'null': 'True'}) }, 'catmaid.skeletonlistdashboard': { 'Meta': {'object_name': 'SkeletonlistDashboard', 'db_table': "'skeletonlist_dashboard'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'shortname': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'skeleton_list': ('catmaid.fields.IntegerArrayField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.slices': { 'Meta': {'object_name': 'Slices'}, 'assembly': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']", 'null': 'True'}), 'center_x': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}), 'center_y': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'flag_left': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'flag_right': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_x': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'max_y': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'min_x': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'min_y': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'node_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'sectionindex': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'size': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'slice_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}), 'threshold': ('django.db.models.fields.FloatField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.stack': { 'Meta': {'object_name': 'Stack', 'db_table': "'stack'"}, 'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'dimension': ('catmaid.fields.Integer3DField', [], {}), 'file_extension': ('django.db.models.fields.TextField', [], {'default': "'jpg'", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_base': ('django.db.models.fields.TextField', [], {}), 'metadata': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'num_zoom_levels': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'resolution': ('catmaid.fields.Double3DField', [], {}), 'tile_height': ('django.db.models.fields.IntegerField', [], {'default': '256'}), 'tile_source_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'tile_width': ('django.db.models.fields.IntegerField', [], {'default': '256'}), 'title': ('django.db.models.fields.TextField', [], {}), 'trakem2_project': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'catmaid.stacksliceinfo': { 'Meta': {'object_name': 'StackSliceInfo'}, 'file_extension': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slice_base_path': ('django.db.models.fields.TextField', [], {}), 'slice_base_url': ('django.db.models.fields.TextField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}) }, 'catmaid.textlabel': { 'Meta': {'object_name': 'Textlabel', 'db_table': "'textlabel'"}, 'colour': ('catmaid.fields.RGBAField', [], {'default': '(1, 0.5, 0, 1)'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'font_name': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'font_size': ('django.db.models.fields.FloatField', [], {'default': '32'}), 'font_style': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'scaling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'text': ('django.db.models.fields.TextField', [], {'default': "'Edit this text ...'"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}) }, 'catmaid.textlabellocation': { 'Meta': {'object_name': 'TextlabelLocation', 'db_table': "'textlabel_location'"}, 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'textlabel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Textlabel']"}) }, 'catmaid.treenode': { 'Meta': {'object_name': 'Treenode', 'db_table': "'treenode'"}, 'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'treenode_editor'", 'db_column': "'editor_id'", 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['catmaid.Treenode']"}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'radius': ('django.db.models.fields.FloatField', [], {}), 'review_time': ('django.db.models.fields.DateTimeField', [], {}), 'reviewer_id': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.treenodeclassinstance': { 'Meta': {'object_name': 'TreenodeClassInstance', 'db_table': "'treenode_class_instance'"}, 'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Treenode']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.treenodeconnector': { 'Meta': {'object_name': 'TreenodeConnector', 'db_table': "'treenode_connector'"}, 'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}), 'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Connector']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}), 'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Treenode']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'color': ('catmaid.fields.RGBAField', [], {'default': '(1.0, 0.06326473260249865, 0.9259908104182738, 1)'}), 'display_stack_reference_lines': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'independent_ontology_workspace_is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'inverse_mouse_wheel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_cropping_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_ontology_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_segmentation_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_tagging_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_text_label_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_tracing_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'taggit.tag': { 'Meta': {'object_name': 'Tag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) }, 'taggit.taggeditem': { 'Meta': {'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"}) } } complete_apps = ['catmaid']
htem/CATMAID
django/applications/catmaid/migrations/0019_create_annotations_for_object_tree_entries.py
Python
agpl-3.0
56,229
[ "NEURON" ]
0672e49d0a3e51215c9edcb54c4f98c7e18c93ea2fdd168f8dba3b428b7a3d84
"""API for Home Connect bound to HASS OAuth.""" from asyncio import run_coroutine_threadsafe import logging import homeconnect from homeconnect.api import HomeConnectError from homeassistant import config_entries, core from homeassistant.const import DEVICE_CLASS_TIMESTAMP, TIME_SECONDS, UNIT_PERCENTAGE from homeassistant.helpers import config_entry_oauth2_flow from homeassistant.helpers.dispatcher import dispatcher_send from .const import ( BSH_ACTIVE_PROGRAM, BSH_POWER_OFF, BSH_POWER_STANDBY, SIGNAL_UPDATE_ENTITIES, ) _LOGGER = logging.getLogger(__name__) class ConfigEntryAuth(homeconnect.HomeConnectAPI): """Provide Home Connect authentication tied to an OAuth2 based config entry.""" def __init__( self, hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry, implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation, ): """Initialize Home Connect Auth.""" self.hass = hass self.config_entry = config_entry self.session = config_entry_oauth2_flow.OAuth2Session( hass, config_entry, implementation ) super().__init__(self.session.token) self.devices = [] def refresh_tokens(self) -> dict: """Refresh and return new Home Connect tokens using Home Assistant OAuth2 session.""" run_coroutine_threadsafe( self.session.async_ensure_token_valid(), self.hass.loop ).result() return self.session.token def get_devices(self): """Get a dictionary of devices.""" appl = self.get_appliances() devices = [] for app in appl: if app.type == "Dryer": device = Dryer(self.hass, app) elif app.type == "Washer": device = Washer(self.hass, app) elif app.type == "Dishwasher": device = Dishwasher(self.hass, app) elif app.type == "FridgeFreezer": device = FridgeFreezer(self.hass, app) elif app.type == "Oven": device = Oven(self.hass, app) elif app.type == "CoffeeMaker": device = CoffeeMaker(self.hass, app) elif app.type == "Hood": device = Hood(self.hass, app) elif app.type == "Hob": device = Hob(self.hass, app) else: _LOGGER.warning("Appliance type %s not implemented.", app.type) continue devices.append({"device": device, "entities": device.get_entity_info()}) self.devices = devices return devices class HomeConnectDevice: """Generic Home Connect device.""" # for some devices, this is instead BSH_POWER_STANDBY # see https://developer.home-connect.com/docs/settings/power_state power_off_state = BSH_POWER_OFF def __init__(self, hass, appliance): """Initialize the device class.""" self.hass = hass self.appliance = appliance def initialize(self): """Fetch the info needed to initialize the device.""" try: self.appliance.get_status() except (HomeConnectError, ValueError): _LOGGER.debug("Unable to fetch appliance status. Probably offline.") try: self.appliance.get_settings() except (HomeConnectError, ValueError): _LOGGER.debug("Unable to fetch settings. Probably offline.") try: program_active = self.appliance.get_programs_active() except (HomeConnectError, ValueError): _LOGGER.debug("Unable to fetch active programs. Probably offline.") program_active = None if program_active and "key" in program_active: self.appliance.status[BSH_ACTIVE_PROGRAM] = {"value": program_active["key"]} self.appliance.listen_events(callback=self.event_callback) def event_callback(self, appliance): """Handle event.""" _LOGGER.debug("Update triggered on %s", appliance.name) _LOGGER.debug(self.appliance.status) dispatcher_send(self.hass, SIGNAL_UPDATE_ENTITIES, appliance.haId) class DeviceWithPrograms(HomeConnectDevice): """Device with programs.""" PROGRAMS = [] def get_programs_available(self): """Get the available programs.""" return self.PROGRAMS def get_program_switches(self): """Get a dictionary with info about program switches. There will be one switch for each program. """ programs = self.get_programs_available() return [{"device": self, "program_name": p["name"]} for p in programs] def get_program_sensors(self): """Get a dictionary with info about program sensors. There will be one of the four types of sensors for each device. """ sensors = { "Remaining Program Time": (None, None, DEVICE_CLASS_TIMESTAMP, 1), "Duration": (TIME_SECONDS, "mdi:update", None, 1), "Program Progress": (UNIT_PERCENTAGE, "mdi:progress-clock", None, 1), } return [ { "device": self, "desc": k, "unit": unit, "key": "BSH.Common.Option.{}".format(k.replace(" ", "")), "icon": icon, "device_class": device_class, "sign": sign, } for k, (unit, icon, device_class, sign) in sensors.items() ] class DeviceWithDoor(HomeConnectDevice): """Device that has a door sensor.""" def get_door_entity(self): """Get a dictionary with info about the door binary sensor.""" return { "device": self, "desc": "Door", "device_class": "door", } class Dryer(DeviceWithDoor, DeviceWithPrograms): """Dryer class.""" PROGRAMS = [ {"name": "LaundryCare.Dryer.Program.Cotton"}, {"name": "LaundryCare.Dryer.Program.Synthetic"}, {"name": "LaundryCare.Dryer.Program.Mix"}, {"name": "LaundryCare.Dryer.Program.Blankets"}, {"name": "LaundryCare.Dryer.Program.BusinessShirts"}, {"name": "LaundryCare.Dryer.Program.DownFeathers"}, {"name": "LaundryCare.Dryer.Program.Hygiene"}, {"name": "LaundryCare.Dryer.Program.Jeans"}, {"name": "LaundryCare.Dryer.Program.Outdoor"}, {"name": "LaundryCare.Dryer.Program.SyntheticRefresh"}, {"name": "LaundryCare.Dryer.Program.Towels"}, {"name": "LaundryCare.Dryer.Program.Delicates"}, {"name": "LaundryCare.Dryer.Program.Super40"}, {"name": "LaundryCare.Dryer.Program.Shirts15"}, {"name": "LaundryCare.Dryer.Program.Pillow"}, {"name": "LaundryCare.Dryer.Program.AntiShrink"}, ] def get_entity_info(self): """Get a dictionary with infos about the associated entities.""" door_entity = self.get_door_entity() program_sensors = self.get_program_sensors() program_switches = self.get_program_switches() return { "binary_sensor": [door_entity], "switch": program_switches, "sensor": program_sensors, } class Dishwasher(DeviceWithDoor, DeviceWithPrograms): """Dishwasher class.""" PROGRAMS = [ {"name": "Dishcare.Dishwasher.Program.Auto1"}, {"name": "Dishcare.Dishwasher.Program.Auto2"}, {"name": "Dishcare.Dishwasher.Program.Auto3"}, {"name": "Dishcare.Dishwasher.Program.Eco50"}, {"name": "Dishcare.Dishwasher.Program.Quick45"}, {"name": "Dishcare.Dishwasher.Program.Intensiv70"}, {"name": "Dishcare.Dishwasher.Program.Normal65"}, {"name": "Dishcare.Dishwasher.Program.Glas40"}, {"name": "Dishcare.Dishwasher.Program.GlassCare"}, {"name": "Dishcare.Dishwasher.Program.NightWash"}, {"name": "Dishcare.Dishwasher.Program.Quick65"}, {"name": "Dishcare.Dishwasher.Program.Normal45"}, {"name": "Dishcare.Dishwasher.Program.Intensiv45"}, {"name": "Dishcare.Dishwasher.Program.AutoHalfLoad"}, {"name": "Dishcare.Dishwasher.Program.IntensivPower"}, {"name": "Dishcare.Dishwasher.Program.MagicDaily"}, {"name": "Dishcare.Dishwasher.Program.Super60"}, {"name": "Dishcare.Dishwasher.Program.Kurz60"}, {"name": "Dishcare.Dishwasher.Program.ExpressSparkle65"}, {"name": "Dishcare.Dishwasher.Program.MachineCare"}, {"name": "Dishcare.Dishwasher.Program.SteamFresh"}, {"name": "Dishcare.Dishwasher.Program.MaximumCleaning"}, ] def get_entity_info(self): """Get a dictionary with infos about the associated entities.""" door_entity = self.get_door_entity() program_sensors = self.get_program_sensors() program_switches = self.get_program_switches() return { "binary_sensor": [door_entity], "switch": program_switches, "sensor": program_sensors, } class Oven(DeviceWithDoor, DeviceWithPrograms): """Oven class.""" PROGRAMS = [ {"name": "Cooking.Oven.Program.HeatingMode.PreHeating"}, {"name": "Cooking.Oven.Program.HeatingMode.HotAir"}, {"name": "Cooking.Oven.Program.HeatingMode.TopBottomHeating"}, {"name": "Cooking.Oven.Program.HeatingMode.PizzaSetting"}, {"name": "Cooking.Oven.Program.Microwave.600Watt"}, ] power_off_state = BSH_POWER_STANDBY def get_entity_info(self): """Get a dictionary with infos about the associated entities.""" door_entity = self.get_door_entity() program_sensors = self.get_program_sensors() program_switches = self.get_program_switches() return { "binary_sensor": [door_entity], "switch": program_switches, "sensor": program_sensors, } class Washer(DeviceWithDoor, DeviceWithPrograms): """Washer class.""" PROGRAMS = [ {"name": "LaundryCare.Washer.Program.Cotton"}, {"name": "LaundryCare.Washer.Program.Cotton.CottonEco"}, {"name": "LaundryCare.Washer.Program.EasyCare"}, {"name": "LaundryCare.Washer.Program.Mix"}, {"name": "LaundryCare.Washer.Program.DelicatesSilk"}, {"name": "LaundryCare.Washer.Program.Wool"}, {"name": "LaundryCare.Washer.Program.Sensitive"}, {"name": "LaundryCare.Washer.Program.Auto30"}, {"name": "LaundryCare.Washer.Program.Auto40"}, {"name": "LaundryCare.Washer.Program.Auto60"}, {"name": "LaundryCare.Washer.Program.Chiffon"}, {"name": "LaundryCare.Washer.Program.Curtains"}, {"name": "LaundryCare.Washer.Program.DarkWash"}, {"name": "LaundryCare.Washer.Program.Dessous"}, {"name": "LaundryCare.Washer.Program.Monsoon"}, {"name": "LaundryCare.Washer.Program.Outdoor"}, {"name": "LaundryCare.Washer.Program.PlushToy"}, {"name": "LaundryCare.Washer.Program.ShirtsBlouses"}, {"name": "LaundryCare.Washer.Program.SportFitness"}, {"name": "LaundryCare.Washer.Program.Towels"}, {"name": "LaundryCare.Washer.Program.WaterProof"}, ] def get_entity_info(self): """Get a dictionary with infos about the associated entities.""" door_entity = self.get_door_entity() program_sensors = self.get_program_sensors() program_switches = self.get_program_switches() return { "binary_sensor": [door_entity], "switch": program_switches, "sensor": program_sensors, } class CoffeeMaker(DeviceWithPrograms): """Coffee maker class.""" PROGRAMS = [ {"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Espresso"}, {"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.EspressoMacchiato"}, {"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Coffee"}, {"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Cappuccino"}, {"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.LatteMacchiato"}, {"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.CaffeLatte"}, {"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.Americano"}, {"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.EspressoDoppio"}, {"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.FlatWhite"}, {"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.Galao"}, {"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.MilkFroth"}, {"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.WarmMilk"}, {"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Ristretto"}, {"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.Cortado"}, ] power_off_state = BSH_POWER_STANDBY def get_entity_info(self): """Get a dictionary with infos about the associated entities.""" program_sensors = self.get_program_sensors() program_switches = self.get_program_switches() return {"switch": program_switches, "sensor": program_sensors} class Hood(DeviceWithPrograms): """Hood class.""" PROGRAMS = [ {"name": "Cooking.Common.Program.Hood.Automatic"}, {"name": "Cooking.Common.Program.Hood.Venting"}, {"name": "Cooking.Common.Program.Hood.DelayedShutOff"}, ] def get_entity_info(self): """Get a dictionary with infos about the associated entities.""" program_sensors = self.get_program_sensors() program_switches = self.get_program_switches() return {"switch": program_switches, "sensor": program_sensors} class FridgeFreezer(DeviceWithDoor): """Fridge/Freezer class.""" def get_entity_info(self): """Get a dictionary with infos about the associated entities.""" door_entity = self.get_door_entity() return {"binary_sensor": [door_entity]} class Hob(DeviceWithPrograms): """Hob class.""" PROGRAMS = [{"name": "Cooking.Hob.Program.PowerLevelMode"}] def get_entity_info(self): """Get a dictionary with infos about the associated entities.""" program_sensors = self.get_program_sensors() program_switches = self.get_program_switches() return {"switch": program_switches, "sensor": program_sensors}
nkgilley/home-assistant
homeassistant/components/home_connect/api.py
Python
apache-2.0
14,340
[ "ESPResSo" ]
9c4c7255b9fe94e41bfdea0c8df13dd50711d821faa11e52ec6a0d20de8da77f
import numpy as np from scipy.fftpack import fft, ifft, fftshift __all__ = ['cwt', 'ccwt', 'icwt', 'SDG', 'Morlet'] class MotherWavelet(object): """Class for MotherWavelets. Contains methods related to mother wavelets. Also used to ensure that new mother wavelet objects contain the minimum requirements to be used in the cwt related functions. """ @staticmethod def get_coefs(self): """Raise error if method for calculating mother wavelet coefficients is missing! """ raise NotImplementedError('get_coefs needs to be implemented for the mother wavelet') @staticmethod def get_coi_coef(sampf): """Raise error if Cone of Influence coefficient is not set in subclass wavelet. To follow the convention in the literature, please define your COI coef as a function of period, not scale - this will ensure compatibility with the scalogram method. """ raise NotImplementedError('coi_coef needs to be implemented in subclass wavelet') #add methods for computing cone of influence and mask def get_coi(self): """Compute cone of influence.""" y1 = self.coi_coef * np.arange(0, self.len_signal / 2) y2 = -self.coi_coef * np.arange(0, self.len_signal / 2) + y1[-1] coi = np.r_[y1, y2] self.coi = coi return coi def get_mask(self): """Get mask for cone of influence. Sets self.mask as an array of bools for use in np.ma.array('', mask=mask) """ mask = np.ones(self.coefs.shape) masks = self.coi_coef * self.scales for s in range(0, len(self.scales)): if (s != 0) and (int(np.ceil(masks[s])) < mask.shape[1]): mask[s,np.ceil(int(masks[s])):-np.ceil(int(masks[s]))] = 0 self.mask = mask.astype(bool) return self.mask class SDG(MotherWavelet): """Class for the SDG MotherWavelet (a subclass of MotherWavelet). SDG(self, len_signal = None, pad_to = None, scales = None, sampf = 1, normalize = True, fc = 'bandpass') Parameters ---------- len_signal : int Length of time series to be decomposed. pad_to : int Pad time series to a total length `pad_to` using zero padding (note, the signal will be zero padded automatically during continuous wavelet transform if pad_to is set). This is used in the fft function when performing the convolution of the wavelet and mother wavelet in Fourier space. scales : array Array of scales used to initialize the mother wavelet. sampf : float Sample frequency of the time series to be decomposed. normalize : bool If True, the normalized version of the mother wavelet will be used (i.e. the mother wavelet will have unit energy). fc : string Characteristic frequency - use the 'bandpass' or 'center' frequency of the Fourier spectrum of the mother wavelet to relate scale to period (default is 'bandpass'). Returns ------- Returns an instance of the MotherWavelet class which is used in the cwt and icwt functions. Examples -------- Create instance of SDG mother wavelet, normalized, using 10 scales and the center frequency of the Fourier transform as the characteristic frequency. Then, perform the continuous wavelet transform and plot the scalogram. # x = numpy.arange(0,2*numpy.pi,numpy.pi/8.) # data = numpy.sin(x**2) # scales = numpy.arange(10) # # mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10),normalize = True, fc = 'center') # wavelet = cwt(data, mother_wavelet) # wave_coefs.scalogram() Notes ----- None References ---------- Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor and Francis Group, New York/London. 353 pp. """ def __init__(self,len_signal=None,pad_to=None,scales=None,sampf=1,normalize=True, fc = 'bandpass'): """Initilize SDG mother wavelet""" self.name='second degree of a Gaussian (mexican hat)' self.sampf = sampf self.scales = scales self.len_signal = len_signal self.normalize = normalize #set total length of wavelet to account for zero padding if pad_to is None: self.len_wavelet = len_signal else: self.len_wavelet = pad_to #set admissibility constant if normalize: self.cg = 4 * np.sqrt(np.pi) / 3. else: self.cg = np.pi #define characteristic frequency if fc is 'bandpass': self.fc = np.sqrt(5./2.) * self.sampf/(2 * np.pi) elif fc is 'center': self.fc = np.sqrt(2.) * self.sampf / (2 * np.pi) else: raise CharacteristicFrequencyError("fc = %s not defined"%(fc,)) # coi_coef defined under the assumption that period is used, not scale self.coi_coef = 2 * np.pi * np.sqrt(2. / 5.) * self.fc # Torrence and # Compo 1998 # compute coefficients for the dilated mother wavelet self.coefs = self.get_coefs() def get_coefs(self): """Calculate the coefficients for the SDG mother wavelet""" # Create array containing values used to evaluate the wavelet function xi=np.arange(-self.len_wavelet / 2., self.len_wavelet / 2.) # find mother wavelet coefficients at each scale xsd = -xi * xi / (self.scales[:,np.newaxis] * self.scales[:,np.newaxis]) if self.normalize is True: c=2. / (np.sqrt(3) * np.power(np.pi, 0.25)) else: c=1. mw = c * (1. + xsd) * np.exp(xsd / 2.) self.coefs = mw return mw class Morlet(MotherWavelet): """Class for the Morlet MotherWavelet (a subclass of MotherWavelet). Morlet(self, len_signal = None, pad_to = None, scales = None, sampf = 1, f0 = 0.849) Parameters ---------- len_signal : int Length of time series to be decomposed. pad_to : int Pad time series to a total length `pad_to` using zero padding (note, the signal will be zero padded automatically during continuous wavelet transform if pad_to is set). This is used in the fft function when performing the convolution of the wavelet and mother wavelet in Fourier space. scales : array Array of scales used to initialize the mother wavelet. sampf : float Sample frequency of the time series to be decomposed. f0 : float Central frequency of the Morlet mother wavelet. The Fourier spectrum of the Morlet wavelet appears as a Gaussian centered on f0. f0 defaults to a value of 0.849 (the angular frequency would be ~5.336). Returns ------- Returns an instance of the MotherWavelet class which is used in the cwt and icwt functions. Examples -------- Create instance of Morlet mother wavelet using 10 scales, perform the continuous wavelet transform, and plot the resulting scalogram. # x = numpy.arange(0,2*numpy.pi,numpy.pi/8.) # data = numpy.sin(x**2) # scales = numpy.arange(10) # # mother_wavelet = Morlet(len_signal=len(data), scales = np.arange(10)) # wavelet = cwt(data, mother_wavelet) # wave_coefs.scalogram() Notes ----- * Morlet wavelet is defined as having unit energy, so the `normalize` flag will always be set to True. * The Morlet wavelet will always use f0 as it's characteristic frequency, so fc is set as f0. References ---------- Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor and Francis Group, New York/London. 353 pp. """ def __init__(self, len_signal=None, pad_to=None, scales=None, sampf=1, normalize=True, f0=0.849): """Initilize Morlet mother wavelet.""" from scipy.integrate import trapz from scipy.integrate import quad, Inf self.sampf = sampf self.scales = scales self.len_signal = len_signal self.normalize = True self.name = 'Morlet' # set total length of wavelet to account for zero padding if pad_to is None: self.len_wavelet = len_signal else: self.len_wavelet = pad_to # define characteristic frequency self.fc = f0 # Cone of influence coefficient self.coi_coef = 2. * self.sampf / (self.fc + np.sqrt(2. + self.fc**2) * np.sqrt(2)); #Torrence and Compo 1998 (in code) # set admissibility constant # based on the simplified Morlet wavelet energy spectrum # in Addison (2002), eqn (2.39) - should be ok for f0 >0.84 # FIXED using quad 04/01/2011 #f = np.arange(0.001, 50, 0.001) #y = 2. * np.sqrt(np.pi) * np.exp(-np.power((2. * np.pi * f - # 2. * np.pi * self.fc), 2)) #self.cg = trapz(y[1:] / f[1:]) * (f[1]-f[0]) self.cg = quad(lambda x : 2. * np.sqrt(np.pi) * np.exp(-np.power((2. * np.pi * x - 2. * np.pi * f0), 2)), -Inf, Inf)[0] # compute coefficients for the dilated mother wavelet self.coefs = self.get_coefs() def get_coefs(self): """Calculate the coefficients for the Morlet mother wavelet.""" # Create array containing values used to evaluate the wavelet function xi=np.arange(-self.len_wavelet / 2., self.len_wavelet / 2.) # find mother wavelet coefficients at each scale xsd = xi / (self.scales[:,np.newaxis]) mw = np.power(np.pi,-0.25) * \ (np.exp(np.complex(1j) * 2. * np.pi * self.fc * xsd) - \ np.exp(-np.power((2. * np.pi * self.fc), 2) / 2.)) * \ np.exp(-np.power(xsd, 2) / 2.) self.coefs = mw return mw class Wavelet(object): """Class for Wavelet object. The Wavelet object holds the wavelet coefficients as well as information on how they were obtained. """ def __init__(self, wt, wavelet, weighting_function, signal_dtype, deep_copy=True): """Initialization of Wavelet object. Parameters ---------- wt : array Array of wavelet coefficients. wavelet : object Mother wavelet object used in the creation of `wt`. weighting_function : function Function used in the creation of `wt`. signal_dtype : dtype dtype of signal used in the creation of `wt`. deep_copy : bool If true (default), the mother wavelet object used in the creation of the wavelet object will be fully copied and accessible through wavelet.motherwavelet; if false, wavelet.motherwavelet will be a reference to the motherwavelet object (that is, if you change the mother wavelet object, you will see the changes when accessing the mother wavelet through the wavelet object - this is NOT good for tracking how the wavelet transform was computed, but setting deep_copy to False will save memory). Returns ------- Returns an instance of the Wavelet class. """ from copy import deepcopy self.coefs = wt[:,0:wavelet.len_signal] if wavelet.len_signal != wavelet.len_wavelet: self._pad_coefs = wt[:,wavelet.len_signal:] else: self._pad_coefs = None if deep_copy: self.motherwavelet = deepcopy(wavelet) else: self.motherwavelet = wavelet self.weighting_function = weighting_function self._signal_dtype = signal_dtype def get_gws(self): """Calculate Global Wavelet Spectrum. References ---------- Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet Analysis. Bulletin of the American Meteorological Society, 79, 1, pp. 61-78. """ gws = self.get_wavelet_var() return gws def get_wes(self): """Calculate Wavelet Energy Spectrum. References ---------- Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet Analysis. Bulletin of the American Meteorological Society, 79, 1, pp. 61-78. """ from scipy.integrate import trapz coef = 1. / (self.motherwavelet.fc * self.motherwavelet.cg) wes = coef * trapz(np.power(np.abs(self.coefs), 2), axis = 1); return wes def get_wps(self): """Calculate Wavelet Power Spectrum. References ---------- Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet Analysis. Bulletin of the American Meteorological Society, 79, 1, pp. 61-78. """ wps = (1./ self.motherwavelet.len_signal) * self.get_wes() return wps def get_wavelet_var(self): """Calculate Wavelet Variance (a.k.a. the Global Wavelet Spectrum of Torrence and Compo (1998)). References ---------- Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet Analysis. Bulletin of the American Meteorological Society, 79, 1, pp. 61-78. """ coef = self.motherwavelet.cg * self.motherwavelet.fc wvar = (coef / self.motherwavelet.len_signal) * self.get_wes() return wvar def scalogram(self, show_coi=False, show_wps=False, ts=None, time=None, use_period=True, ylog_base=None, xlog_base=None, origin='top', figname=None): """ Scalogram plotting routine. Creates a simple scalogram, with optional wavelet power spectrum and time series plots of the transformed signal. Parameters ---------- show_coi : bool Set to True to see Cone of Influence show_wps : bool Set to True to see the Wavelet Power Spectrum ts : array 1D array containing time series data used in wavelet transform. If set, time series will be plotted. time : array of datetime objects 1D array containing time information use_period : bool Set to True to see figures use period instead of scale ylog_base : float If a log scale is desired, set `ylog_base` as float. (for log 10, set ylog_base = 10) xlog_base : float If a log scale is desired, set `xlog_base` as float. (for log 10, set xlog_base = 10) *note that this option is only valid for the wavelet power spectrum figure. origin : 'top' or 'bottom' Set origin of scale axis to top or bottom of figure Returns ------- None Examples -------- Create instance of SDG mother wavelet, normalized, using 10 scales and the center frequency of the Fourier transform as the characteristic frequency. Then, perform the continuous wavelet transform and plot the scalogram. # x = numpy.arange(0,2*numpy.pi,numpy.pi/8.) # data = numpy.sin(x**2) # scales = numpy.arange(10) # # mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10), normalize = True, fc = 'center') # wavelet = cwt(data, mother_wavelet) # wave_coefs.scalogram(origin = 'bottom') """ import matplotlib.pyplot as plt import matplotlib.cm as cm from pylab import poly_between if ts is not None: show_ts = True else: show_ts = False if not show_wps and not show_ts: # only show scalogram figrow = 1 figcol = 1 elif show_wps and not show_ts: # show scalogram and wps figrow = 1 figcol = 4 elif not show_wps and show_ts: # show scalogram and ts figrow = 2 figcol = 1 else: # show scalogram, wps, and ts figrow = 2 figcol = 4 if time is None: x = np.arange(self.motherwavelet.len_signal) else: x = time if use_period: y = self.motherwavelet.scales / self.motherwavelet.fc else: y = self.motherwavelet.scales fig = plt.figure(figsize=(16, 12), dpi=160) ax1 = fig.add_subplot(figrow, figcol, 1) # if show wps, give 3/4 space to scalogram, 1/4 to wps if show_wps: # create temp axis at 3 or 4 col of row 1 axt = fig.add_subplot(figrow, figcol, 3) # get location of axtmp and ax1 axt_pos = axt.get_position() ax1_pos = ax1.get_position() axt_points = axt_pos.get_points() ax1_points = ax1_pos.get_points() # set axt_pos left bound to that of ax1 axt_points[0][0] = ax1_points[0][0] ax1.set_position(axt_pos) fig.delaxes(axt) if show_coi: # coi_coef is defined using the assumption that you are using # period, not scale, in plotting - this handles that behavior if use_period: coi = self.motherwavelet.get_coi() / self.motherwavelet.fc / self.motherwavelet.sampf else: coi = self.motherwavelet.get_coi() coi[coi == 0] = y.min() - 0.1 * y.min() xs, ys = poly_between(np.arange(0, len(coi)), np.max(y), coi) ax1.fill(xs, ys, 'k', alpha=0.4, zorder = 2) contf=ax1.contourf(x,y,np.abs(self.coefs)**2) fig.colorbar(contf, ax=ax1, orientation='vertical', format='%2.1f') if ylog_base is not None: ax1.axes.set_yscale('log', basey=ylog_base) if origin is 'top': ax1.set_ylim((y[-1], y[0])) elif origin is 'bottom': ax1.set_ylim((y[0], y[-1])) else: raise OriginError('`origin` must be set to "top" or "bottom"') ax1.set_xlim((x[0], x[-1])) ax1.set_title('scalogram') ax1.set_ylabel('time') if use_period: ax1.set_ylabel('period') ax1.set_xlabel('time') else: ax1.set_ylabel('scales') if time is not None: ax1.set_xlabel('time') else: ax1.set_xlabel('sample') if show_wps: ax2 = fig.add_subplot(figrow,figcol,4,sharey=ax1) if use_period: ax2.plot(self.get_wps(), y, 'k') else: ax2.plot(self.motherwavelet.fc * self.get_wps(), y, 'k') if ylog_base is not None: ax2.axes.set_yscale('log', basey=ylog_base) if xlog_base is not None: ax2.axes.set_xscale('log', basey=xlog_base) if origin is 'top': ax2.set_ylim((y[-1], y[0])) else: ax2.set_ylim((y[0], y[-1])) if use_period: ax2.set_ylabel('period') else: ax2.set_ylabel('scales') ax2.grid() ax2.set_title('wavelet power spectrum') if show_ts: ax3 = fig.add_subplot(figrow, 2, 3, sharex=ax1) ax3.plot(x, ts) ax3.set_xlim((x[0], x[-1])) ax3.legend(['time series']) ax3.grid() # align time series fig with scalogram fig t = ax3.get_position() ax3pos=t.get_points() ax3pos[1][0]=ax1.get_position().get_points()[1][0] t.set_points(ax3pos) ax3.set_position(t) if (time is not None) or use_period: ax3.set_xlabel('time') else: ax3.set_xlabel('sample') if figname is None: plt.show() else: plt.savefig(figname) plt.close('all') def cwt(x, wavelet, weighting_function=lambda x: x**(-0.5), deep_copy=True): """Computes the continuous wavelet transform of x using the mother wavelet `wavelet`. This function computes the continuous wavelet transform of x using an instance a mother wavelet object. The cwt is defined as: T(a,b) = w(a) integral(-inf,inf)(x(t) * psi*{(t-b)/a} dt which is a convolution. In this algorithm, the convolution in the time domain is implemented as a multiplication in the Fourier domain. Parameters ---------- x : 1D array Time series to be transformed by the cwt wavelet : Instance of the MotherWavelet class Instance of the MotherWavelet class for a particular wavelet family weighting_function: Function used to weight Typically w(a) = a^(-0.5) is chosen as it ensures that the wavelets at every scale have the same energy. deep_copy : bool If true (default), the mother wavelet object used in the creation of the wavelet object will be fully copied and accessible through wavelet.motherwavelet; if false, wavelet.motherwavelet will be a reference to the motherwavelet object (that is, if you change the mother wavelet object, you will see the changes when accessing the mother wavelet through the wavelet object - this is NOT good for tracking how the wavelet transform was computed, but setting deep_copy to False will save memory). Returns ------- Returns an instance of the Wavelet class. The coefficients of the transform can be obtain by the coefs() method (i.e. wavelet.coefs() ) Examples -------- Create instance of SDG mother wavelet, normalized, using 10 scales and the center frequency of the Fourier transform as the characteristic frequency. Then, perform the continuous wavelet transform and plot the scalogram. # x = numpy.arange(0,2*numpy.pi,numpy.pi/8.) # data = numpy.sin(x**2) # scales = numpy.arange(10) # # mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10), normalize = True, fc = 'center') # wavelet = cwt(data, mother_wavelet) # wave_coefs.scalogram() References ---------- Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor and Francis Group, New York/London. 353 pp. """ signal_dtype = x.dtype if len(x) < wavelet.len_wavelet: n = len(x) x = np.resize(x, (wavelet.len_wavelet,)) x[n:] = 0 # Transform the signal and mother wavelet into the Fourier domain xf=fft(x) mwf=fft(wavelet.coefs.conj(), axis=1) # Convolve (multiply in Fourier space) wt_tmp=ifft(mwf*xf[np.newaxis,:], axis=1) # shift output from ifft and multiply by weighting function wt = fftshift(wt_tmp,axes=[1]) * weighting_function(wavelet.scales[:, np.newaxis]) # if mother wavelet and signal are real, only keep real part of transform wt=wt.astype(np.lib.common_type(wavelet.coefs, x)) return Wavelet(wt,wavelet,weighting_function,signal_dtype,deep_copy) def ccwt(x1, x2, wavelet): """Compute the continuous cross-wavelet transform of 'x1' and 'x2' using the mother wavelet 'wavelet', which is an instance of the MotherWavelet class. Parameters ---------- x1,x2 : 1D array Time series used to compute cross-wavelet transform wavelet : Instance of the MotherWavelet class Instance of the MotherWavelet class for a particular wavelet family Returns ------- Returns an instance of the Wavelet class. """ xwt = cwt(x1,wavelet) * np.conjugate(cwt(x2, wavelet)) return xwt def icwt(wavelet): """Compute the inverse continuous wavelet transform. Parameters ---------- wavelet : Instance of the MotherWavelet class instance of the MotherWavelet class for a particular wavelet family Examples -------- Use the Morlet mother wavelet to perform wavelet transform on 'data', then use icwt to compute the inverse wavelet transform to come up with an estimate of data ('data2'). Note that data2 is not exactly equal data. # import matplotlib.pyplot as plt # from scipy.signal import SDG, Morlet, cwt, icwt, fft, ifft # import numpy as np # # x = np.arange(0,2*np.pi,np.pi/64) # data = np.sin(8*x) # scales=np.arange(0.5,17) # # mother_wavelet = Morlet(len_signal = len(data), scales = scales) # wave_coefs=cwt(data, mother_wavelet) # data2 = icwt(wave_coefs) # # plt.plot(data) # plt.plot(data2) # plt.show() References ---------- Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor and Francis Group, New York/London. 353 pp. """ from scipy.integrate import trapz # if original wavelet was created using padding, make sure to include # information that is missing after truncation (see self.coefs under __init__ # in class Wavelet. if wavelet.motherwavelet.len_signal != wavelet.motherwavelet.len_wavelet: full_wc = np.c_[wavelet.coefs,wavelet._pad_coefs] else: full_wc = wavelet.coefs # get wavelet coefficients and take fft wcf = fft(full_wc,axis=1) # get mother wavelet coefficients and take fft mwf = fft(wavelet.motherwavelet.coefs,axis=1) # perform inverse continuous wavelet transform and make sure the result is the same type # (real or complex) as the original data used in the transform x = (1. / wavelet.motherwavelet.cg) * trapz( fftshift(ifft(wcf * mwf,axis=1),axes=[1]) / (wavelet.motherwavelet.scales[:,np.newaxis]**2), dx = 1. / wavelet.motherwavelet.sampf, axis=0) return x[0:wavelet.motherwavelet.len_signal].astype(wavelet._signal_dtype)
Unidata/pyCWT
cwt.py
Python
bsd-3-clause
26,078
[ "Gaussian" ]
1f105813e4ddf5a1fd94e85738cc20c65fe39896b721006185791c0a2b491d6c
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2018 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This file is part of Psi4. # # Psi4 is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, version 3. # # Psi4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along # with Psi4; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # from __future__ import print_function """Module with utility classes and functions related to data tables and text. """ import sys from psi4 import core from psi4.driver import constants from .exceptions import * class Table(object): """Class defining a flexible Table object for storing data.""" def __init__(self, rows=(), row_label_width=10, row_label_precision=4, cols=(), width=16, precision=10): self.row_label_width = row_label_width self.row_label_precision = row_label_precision self.width = width self.precision = precision self.rows = rows if isinstance(cols, str): self.cols = (cols,) else: self.cols = cols self.labels = [] self.data = [] def format_label(self): """Function to pad the width of Table object labels.""" #str = lambda x: (('%%%d.%df' % (self.row_label_width, self.row_label_precision)) % x) str = lambda x: (('%%%ds' % (self.row_label_width)) % x) return " ".join(map(str, self.labels)) def format_values(self, values): """Function to pad the width of Table object data cells.""" str = lambda x: (('%%%d.%df' % (self.width, self.precision)) % x) return " ".join(map(str, values)) def __getitem__(self, value): self.labels.append(value) return self def __setitem__(self, name, value): self.labels.append(name) label = self.format_label() self.labels = [] if isinstance(value, list): self.data.append((label, value)) else: self.data.append((label, [value])) def save(self, file): """Function to save string of the Table object to *file*.""" import pickle pickle_str = pickle.dumps(self) fileobj = open(file, "w") fileobj.write(str(self)) fileobj.close() def __str__(self): rowstr = lambda x: '%%%ds' % self.row_label_width % x colstr = lambda x: '%%%ds' % self.width % x lines = [] table_header = "" if isinstance(self.rows, str): table_header += "%%%ds" % self.row_label_width % self.rows else: table_header += " ".join(map(rowstr, self.rows)) table_header += " ".join(map(colstr, self.cols)) lines.append(table_header) for datarow in self.data: #print datarow row_data = datarow[0] row_data += self.format_values(datarow[1]) lines.append(row_data) return "\n".join(lines) + "\n" def copy(self): """Function to return a copy of the Table object.""" import copy return copy.deepcopy(self) def absolute_to_relative(self, Factor=constants.hartree2kcalmol): """Function to shift the data of each column of the Table object such that the lowest value is zero. A scaling factor of *Factor* is applied. """ import copy if len(self.data) == 0: return current_min = list(copy.deepcopy(self.data[0][1])) for datarow in self.data: for col in range(0, len(datarow[1])): if current_min[col] > datarow[1][col]: current_min[col] = datarow[1][col] for datarow in self.data: for col in range(0, len(datarow[1])): #print datarow[1][col] datarow[1][col] = (datarow[1][col] - current_min[col]) * Factor def scale(self, Factor=constants.hartree2kcalmol): """Function to apply a scaling factor *Factor* to the data of the Table object. """ if len(self.data) == 0: return for datarow in self.data: for col in range(0, len(datarow[1])): #print datarow[1][col] datarow[1][col] = datarow[1][col] * Factor def banner(text, type=1, width=35, strNotOutfile=False): """Function to print *text* to output file in a banner of minimum width *width* and minimum three-line height for *type* = 1 or one-line height for *type* = 2. If *strNotOutfile* is True, function returns string rather than printing it to output file. """ lines = text.split('\n') max_length = 0 for line in lines: if (len(line) > max_length): max_length = len(line) max_length = max([width, max_length]) null = '' if type == 1: banner = ' //' + null.center(max_length, '>') + '//\n' for line in lines: banner += ' //' + line.center(max_length) + '//\n' banner += ' //' + null.center(max_length, '<') + '//\n' if type == 2: banner = '' for line in lines: banner += (' ' + line + ' ').center(max_length, '=') if strNotOutfile: return banner else: core.print_out(banner) def print_stdout(stuff): """Function to print *stuff* to standard output stream.""" print(stuff, file=sys.stdout) def print_stderr(stuff): """Function to print *stuff* to standard error stream.""" print(stuff, file=sys.stderr) def levenshtein(seq1, seq2): """Function to compute the Levenshtein distance between two strings.""" oneago = None thisrow = list(range(1, len(seq2) + 1)) + [0] for x in range(len(seq1)): twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1] for y in range(len(seq2)): delcost = oneago[y] + 1 addcost = thisrow[y - 1] + 1 subcost = oneago[y - 1] + (seq1[x] != seq2[y]) thisrow[y] = min(delcost, addcost, subcost) return thisrow[len(seq2) - 1] def find_approximate_string_matches(seq1,options,max_distance): """Function to compute approximate string matches from a list of options.""" matches = [] for seq2 in options: distance = levenshtein(seq1,seq2) if distance <= max_distance: matches.append(seq2) return matches
amjames/psi4
psi4/driver/p4util/text.py
Python
lgpl-3.0
6,983
[ "Psi4" ]
f1baed4f83da0abffda0137331a8a0b4bcc523ace1d3502ef995b13da37d9bfd
#!/usr/bin/env python import yaml import rospy import roslib from std_srvs.srv import Empty from activity_exploration.srv import ChangeMethodSrv from activity_exploration.srv import ChangeMethodSrvResponse from activity_exploration.budget_control import BudgetControl from strands_executive_msgs.msg import Task from strands_executive_msgs import task_utils from strands_navigation_msgs.msg import TopologicalMap from region_observation.util import is_intersected from region_observation.util import robot_view_cone, get_soma_info class ActivityRecommender(object): def __init__(self): rospy.loginfo("Initiating activity exploration...") self.soma_config = rospy.get_param( "~soma_config", "activity_exploration" ) self.exploration_method = rospy.get_param("~exploration_method", "ubc") self._exp_req_dur = rospy.Duration( rospy.get_param("~exploration_update_interval", 600) ) self.exploration_duration = rospy.Duration( rospy.get_param("~exploration_duration", "600") ) observe_interval = rospy.Duration(self.exploration_duration.secs*3) self.budget_control = BudgetControl(observe_interval=observe_interval) # all services to counters people_srv_name = rospy.get_param( "~people_srv", "/people_counter/people_best_time_estimate" ) if people_srv_name != "": people_srv_name = "/" + people_srv_name.split("/")[1] + "/restart" rospy.loginfo("Connecting to %s service..." % people_srv_name) self._people_srv = rospy.ServiceProxy(people_srv_name, Empty) self._people_srv.wait_for_service() act_srv_name = rospy.get_param( "~activity_srv", "/activity_counter/activity_best_time_estimate" ) if act_srv_name != "": act_srv_name = "/" + act_srv_name.split("/")[1] + "/restart" rospy.loginfo("Connecting to %s service..." % act_srv_name) self._act_srv = rospy.ServiceProxy(act_srv_name, Empty) self._act_srv.wait_for_service() scene_srv_name = rospy.get_param( "~scene_srv", "/scene_counter/scene_best_time_estimate" ) if scene_srv_name != "": scene_srv_name = "/" + scene_srv_name.split("/")[1] + "/restart" rospy.loginfo("Connecting to %s service..." % scene_srv_name) self._scene_srv = rospy.ServiceProxy(scene_srv_name, Empty) self._scene_srv.wait_for_service() # regions self.epsilon = 0.15 self.topo_map = None if rospy.get_param("~with_config_file", False): self.region_wps = self._get_waypoints_from_file() else: self.region_wps = self._get_waypoints(self.soma_config) rospy.loginfo( "Region ids and their nearest waypoints: %s" % str(self.region_wps) ) rospy.sleep(0.1) rospy.Service( '%s/change_method_srv' % rospy.get_name(), ChangeMethodSrv, self._change_srv_cb ) # self.request_exploration(None) rospy.Timer(self._exp_req_dur, self.request_exploration) def _change_srv_cb(self, msg): rospy.loginfo("An exploration method change is requested") rospy.loginfo("Changing to %s method..." % msg.exploration_method) self.exploration_method = msg.exploration_method rospy.loginfo("Restarting all counting processes...") self._act_srv() self._scene_srv() self._people_srv() return ChangeMethodSrvResponse() def request_exploration(self, event): self.budget_control.get_budget_alloc(self.region_wps.keys()) for (start, roi, budget) in self.budget_control.budget_alloc: wp = self.region_wps[roi] start_time = start - self.exploration_duration end_time = start_time + self.exploration_duration + self.exploration_duration duration = self.exploration_duration task = Task( action="record_skeletons", start_node_id=wp, end_node_id=wp, start_after=start_time, end_before=end_time, max_duration=duration ) task_utils.add_duration_argument(task, duration) task_utils.add_string_argument(task, roi) task_utils.add_string_argument(task, self.soma_config) rospy.loginfo( "Task to be requested: {wp:%s, roi:%s, start:%d, duration:%d, budget:%d" % ( wp, roi, start_time.secs, duration.secs, int(budget) ) ) self.budget_control.bidder.add_task_bid(task, int(budget)) rospy.loginfo("Finish adding tasks...") # def _check_visit_plan(self, start_time, end_time, visit_plan): # scales = self.people_srv(start_time, end_time, False, True) # scale_plan = list() # for ind, scale in enumerate(scales.estimates): # scale_plan.append((scale, scales.region_ids[ind])) # if len(scale_plan) != 0: # scale_plan = sorted(scale_plan, key=lambda i: i[0], reverse=True) # lower_threshold = scale_plan[0][0] - (self.epsilon * scale_plan[0][0]) # high_visit = list() # for total_scale, roi in scale_plan: # if total_scale <= scale_plan[0][0] and total_scale >= lower_threshold: # high_visit.append(roi) # p = len(high_visit) / float(len(scales.estimates)) # scale_plan = sorted(scale_plan, key=lambda i: i[0]) # if random.random() > p: # rospy.loginfo("Changing WayPoints to visit unobserved places...") # new_visit_plan = list() # for i in scale_plan: # for j in visit_plan: # if i[1] == j[1]: # new_visit_plan.append(j) # break # visit_plan = new_visit_plan # return visit_plan def _topo_map_cb(self, topo_map): self.topo_map = topo_map def _get_waypoints(self, soma_config): region_wps = dict() # get regions information regions, _ = get_soma_info(soma_config) # get waypoint information topo_sub = rospy.Subscriber( "/topological_map", TopologicalMap, self._topo_map_cb, None, 10 ) rospy.loginfo("Getting information from /topological_map...") while self.topo_map is None: rospy.sleep(0.1) topo_sub.unregister() for wp in self.topo_map.nodes: wp_sight, _ = robot_view_cone(wp.pose) intersected_rois = list() intersected_regions = list() for roi, region in regions.iteritems(): if is_intersected(wp_sight, region): intersected_regions.append(region) intersected_rois.append(roi) for ind, region in enumerate(intersected_regions): area = wp_sight.intersection(region).area roi = intersected_rois[ind] if roi in region_wps: _, area1 = region_wps[roi] if area > area1: region_wps.update({roi: (wp.name, area)}) else: region_wps.update({roi: (wp.name, area)}) return { roi: tupleoftwo[0] for roi, tupleoftwo in region_wps.iteritems() } def _get_waypoints_from_file(self): roi_wp_hashmap = yaml.load( open( roslib.packages.get_pkg_dir('activity_exploration') + '/config/region_to_wp.yaml', 'r' ) ) return roi_wp_hashmap if __name__ == '__main__': rospy.init_node("activity_exploration") ar = ActivityRecommender() rospy.spin()
strands-project/strands_exploration
activity_exploration/src/activity_exploration/exploration.py
Python
mit
7,914
[ "VisIt" ]
8f7125c713d05e9fa1653075a6b7294c30d646fe1e226f86a3c8d7d315ac22c6
# # MuPIF: Multi-Physics Integration Framework # Copyright (C) 2010-2015 Borek Patzak # # Czech Technical University, Faculty of Civil Engineering, # Department of Structural Mechanics, 166 29 Prague, Czech Republic # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, # Boston, MA 02110-1301 USA # import os import Pyro5.api from . import apierror from . import mupifobject from .dataid import DataID from . import property from . import field from . import function from . import timestep from . import pyroutil from . import pyrofile from typing import Optional, Any import time from pydantic.dataclasses import dataclass import logging log = logging.getLogger() prefix = "mupif." type_ids = [] type_ids.extend(prefix+s for s in list(map(str, DataID))) # Schema for metadata for Model and further passed to Workflow ModelSchema = { "type": "object", # Object supplies a dictionary "properties": { # Name: e.g. Non-stationary thermal problem, obtained automatically from getApplicationSignature() # Name of the model (or workflow), e.g. "stationary thermal model", "steel buckling workflow" "Name": {"type": "string"}, # ID: Unique ID of model (workflow), e.g. "Lammps", "CalculiX", "MFEM", "Buckling workflow 1" "ID": {"type": ["string", "integer"]}, "Description": {"type": "string"}, "Version_date": {"type": "string"}, "Material": {"type": "string"}, # What material is simulated "Manuf_process": {"type": "string"}, # Manufacturing process or in-service conditions "Geometry": {"type": "string"}, # e.g. nanometers, 3D periodic box "Physics": { # Corresponds to MODA Generic Physics "type": "object", "properties": { # Type: MODA model type "Type": {"type": "string", "enum": ["Electronic", "Atomistic", "Molecular", "Mesoscopic", "Continuum", "Other"]}, "Entity": {"type": "string", "enum": ["Atom", "Electron", "Grains", "Finite volume", "Other"]}, # Entity_description: E.g. Atoms are treated as spherical entities in space with the radius and mass # determined by the element type "Entity_description": {"type": "string"}, # Equation: List of equations' description such as Equation of motion, heat balance, mass conservation. # MODA PHYSICS EQUATIONS "Equation": {"type": "array"}, # Equation_quantities: e.g. Force, mass, potential, energy, stress, heat, temperature. "Equation_quantities": {"type": "array"}, # Relation_description: Describes equilibrium of forces on an infinitesimal element, etc. "Relation_description": {"type": "array"}, # Relation_formulation: Constitutive equation (material relation), e.g. force field, stress-strain, # flow-gradient. MODA MATERIAL RELATIONS "Relation_formulation": {"type": "array"} }, "required": ["Type", "Entity"] }, "Solver": { "properties": { # Software: Name of the software (e.g.openFOAM). Corresponds to MODA SOFTWARE TOOL "Software": {"type": "string"}, "Language": {"type": "string"}, "License": {"type": "string"}, "Creator": {"type": "string"}, "Version_date": {"type": "string"}, # Type: Type e.g. finite difference method for Ordinary Differential Equations (ODEs) # Corresponds to MODA Solver Specification NUMERICAL SOLVER attribute. "Type": {"type": "string"}, # Solver_additional_params: Additional parameters of numerical solver, e.g. time integration scheme "Solver_additional_params": {"type": "string"}, "Documentation": {"type": "string"}, # Where published/documented "Estim_time_step_s": {"type": "number"}, # Seconds "Estim_comp_time_s": {"type": "number"}, # Seconds "Estim_execution_cost_EUR": {"type": "number"}, # EUR "Estim_personnel_cost_EUR": {"type": "number"}, # EUR "Required_expertise": {"type": "string", "enum": ["None", "User", "Expert"]}, "Accuracy": {"type": "string", "enum": ["Low", "Medium", "High", "Unknown"]}, "Sensitivity": {"type": "string", "enum": ["Low", "Medium", "High", "Unknown"]}, "Complexity": {"type": "string", "enum": ["Low", "Medium", "High", "Unknown"]}, "Robustness": {"type": "string", "enum": ["Low", "Medium", "High", "Unknown"]} }, "required": [ "Software", "Language", "License", "Creator", "Version_date", "Type", "Documentation", "Estim_time_step_s", "Estim_comp_time_s", "Estim_execution_cost_EUR", "Estim_personnel_cost_EUR", "Required_expertise", "Accuracy", "Sensitivity", "Complexity", "Robustness" ] }, "Execution": { "properties": { "ID": {"type": ["string", "integer"]}, # Optional application execution ID (typically set by workflow) # Use_case_ID: user case ID (e.g. thermo-mechanical simulation coded as 1_1) "Use_case_ID": {"type": ["string", "integer"]}, # Task_ID: user task ID (e.g. variant of user case ID such as model with higher accuracy) "Task_ID": {"type": "string"}, "Status": {"type": "string", "enum": ["Instantiated", "Initialized", "Running", "Finished", "Failed"]}, "Progress": {"type": "number"}, # Progress in % "Date_time_start": {"type": "string"}, # automatically set in Workflow "Date_time_end": {"type": "string"}, # automatically set in Workflow "Username": {"type": "string"}, # automatically set in Model and Workflow "Hostname": {"type": "string"} # automatically set in Model and Workflow }, "required": ["ID"] }, "Inputs": { "type": "array", # List "items": { "type": "object", # Object supplies a dictionary "properties": { "Type": {"type": "string", "enum": ["mupif.Property", "mupif.Field", "mupif.ParticleSet", "mupif.GrainState", "mupif.PyroFile"]}, "Type_ID": {"type": "string", "enum": type_ids}, # e.g. PID_Concentration "Obj_ID": { # optional parameter for additional info, string or list of string "anyof": [{"type": "string"}, {"type": "array", "items": {"type": "string"}}] }, "Name": {"type": "string"}, "ValueType": {"type": "string", "enum": ["Scalar", "Vector", "Tensor", "ScalarArray", "VectorArray", "TensorArray"]}, "Description": {"type": "string"}, "Units": {"type": "string"}, "Required": {"type": "boolean"}, "Set_at": {"type": "string", "enum": ["initialization", "timestep"]} }, "required": ["Type", "Type_ID", "Name", "Units", "Required", "Set_at"], "allOf": [ { "anyOf": [ { "not": { "properties": { "Type": {"const": "mupif.Property"} } } }, {"required": ["ValueType"]} ] } ] } }, "Outputs": { "type": "array", "items": { "type": "object", "properties": { "Type": {"type": "string", "enum": ["mupif.Property", "mupif.Field", "mupif.ParticleSet", "mupif.GrainState"]}, "Type_ID": {"type": "string", "enum": type_ids}, # e.g. mupif.DataID.FID_Temperature "Obj_ID": { # optional parameter for additional info, string or list of string "anyof": [{"type": "string"}, {"type": "array", "items": {"type": "string"}}] }, "Name": {"type": "string"}, "ValueType": {"type": "string", "enum": ["Scalar", "Vector", "Tensor", "ScalarArray", "VectorArray", "TensorArray"]}, "Description": {"type": "string"}, "Units": {"type": "string"} }, "required": ["Type", "Type_ID", "Name", "Units"], "allOf": [ { "anyOf": [ { "not": { "properties": { "Type": {"const": "mupif.Property"} } } }, {"required": ["ValueType"]} ] } ] } } }, "required": [ "Name", "ID", "Description", "Physics", "Solver", "Execution", "Inputs", "Outputs" ] } @Pyro5.api.expose class Model(mupifobject.MupifObject): """ An abstract class representing an application and its interface (API). The purpose of this class is to define abstract services for data exchange and steering. This interface has to be implemented/provided by any application. The data exchange is performed by the means of new data types introduced in the framework, namely properties and fields. New abstract data types (properties, fields) allow to hide all implementation details related to discretization and data storage. .. automethod:: __init__ """ pyroDaemon: Optional[Any] = None externalDaemon: bool = False pyroNS: Optional[str] = None pyroURI: Optional[str] = None appName: str = None workDir: str = '' _jobID: str = None def __init__(self, *, metadata={}, **kw): (username, hostname) = pyroutil.getUserInfo() defaults = dict([ ('Username', username), ('Hostname', hostname), ('Status', 'Initialized'), ('Date_time_start', time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())), ('Execution', {}), ('Solver', {}) ]) # use defaults for metadata, unless given explicitly for k, v in defaults.items(): if k not in metadata: metadata[k] = v super().__init__(metadata=metadata, **kw) def initialize(self, workdir='', metadata={}, validateMetaData=True, **kwargs): """ Initializes application, i.e. all functions after constructor and before run. :param str workdir: Optional parameter for working directory :param dict metadata: Optional dictionary used to set up metadata (can be also set by setMetadata() ). :param bool validateMetaData: Defines if the metadata validation will be called :param named_arguments kwargs: Arbitrary further parameters """ self.updateMetadata(metadata) self.setMetadata('Name', self.getApplicationSignature()) self.setMetadata('Status', 'Initialized') if workdir == '': self.workDir = os.getcwd() else: self.workDir = workdir if validateMetaData: self.validateMetadata(ModelSchema) # log.info('Metadata successfully validated') def registerPyro(self, pyroDaemon, pyroNS, pyroURI, appName=None, externalDaemon=False): """ Register the Pyro daemon and nameserver. Required by several services :param Pyro5.api.Daemon pyroDaemon: Optional pyro daemon :param Pyro5.naming.Nameserver pyroNS: Optional nameserver :param string pyroURI: Optional URI of receiver :param string appName: Optional application name. Used for removing from pyroNS :param bool externalDaemon: Optional parameter when daemon was allocated externally. """ self.pyroDaemon = pyroDaemon self.pyroNS = pyroNS self.pyroURI = pyroURI self.appName = appName self.externalDaemon = externalDaemon def get(self, objectTypeID, time=None, objectID=""): """ Returns the requested object at given time. Object is identified by id. :param DataID objectTypeID: Identifier of the object :param Physics.PhysicalQuantity time: Target time :param int objectID: Identifies object with objectID (optional, default 0) :return: Returns requested object. """ def set(self, obj, objectID=""): """ Registers the given (remote) object in application. :param property.Property or field.Field or function.Function or pyrofile.PyroFile or heavydata.HeavyDataHandle obj: Remote object to be registered by the application :param int or str objectID: Identifies object with objectID (optional, default 0) """ def getFieldURI(self, fieldID, time, objectID=""): """ Returns the uri of requested field at given time. Field is identified by fieldID. :param DataID fieldID: Identifier of the field :param Physics.PhysicalQuantity time: Target time :param int objectID: Identifies field with objectID (optional, default 0) :return: Requested field uri :rtype: Pyro5.api.URI """ if self.pyroDaemon is None: raise apierror.APIError('Error: getFieldURI requires to register pyroDaemon in application') try: var_field = self.get(fieldID, time, objectID=objectID) except Exception: self.setMetadata('Status', 'Failed') raise apierror.APIError('Error: can not obtain field') if hasattr(var_field, '_PyroURI'): return var_field._PyroURI else: uri = self.pyroDaemon.register(var_field) # inject uri into var_field attributes, note: _PyroURI is avoided # for deepcopy operation var_field._PyroURI = uri # self.pyroNS.register("MUPIF."+self.pyroName+"."+str(fieldID), uri) return uri def solveStep(self, tstep, stageID=0, runInBackground=False): """ Solves the problem for given time step. Proceeds the solution from actual state to given time. The actual state should not be updated at the end, as this method could be called multiple times for the same solution step until the global convergence is reached. When global convergence is reached, finishStep is called and then the actual state has to be updated. Solution can be split into individual stages identified by optional stageID parameter. In between the stages the additional data exchange can be performed. See also wait and isSolved services. :param timestep.TimeStep tstep: Solution step :param int stageID: optional argument identifying solution stage (default 0) :param bool runInBackground: optional argument, defualt False. If True, the solution will run in background (in separate thread or remotely). """ self.setMetadata('Status', 'Running') self.setMetadata('Progress', 0.) def wait(self): """ Wait until solve is completed when executed in background. """ def isSolved(self): """ Check whether solve has completed. :return: Returns true or false depending whether solve has completed when executed in background. :rtype: bool """ def finishStep(self, tstep): """ Called after a global convergence within a time step is achieved. :param timestep.TimeStep tstep: Solution step """ def getCriticalTimeStep(self): """ Returns a critical time step for an application. :return: Returns the actual (related to current state) critical time step increment :rtype: Physics.PhysicalQuantity """ def getAssemblyTime(self, tstep): """ Returns the assembly time related to given time step. The registered fields (inputs) should be evaluated in this time. :param timestep.TimeStep tstep: Solution step :return: Assembly time :rtype: Physics.PhysicalQuantity, timestep.TimeStep """ def storeState(self, tstep): """ Store the solution state of an application. :param timestep.TimeStep tstep: Solution step """ def restoreState(self, tstep): """ Restore the saved state of an application. :param timestep.TimeStep tstep: Solution step """ def getAPIVersion(self): """ :return: Returns the supported API version :rtype: str, int """ def getApplicationSignature(self): """ Get application signature. :return: Returns the application identification :rtype: str """ return "Model" def removeApp(self, nameServer=None, appName=None): """ Removes (unregisters) application from the name server. :param Pyro5.naming.Nameserver nameServer: Optional instance of a nameServer :param str appName: Optional name of the application to be removed """ if nameServer is None: nameServer = self.pyroNS if appName is None: appName = self.appName if nameServer is not None: # local application can run without a nameServer try: log.debug("Removing application %s from a nameServer %s" % (appName, nameServer)) nameServer._pyroClaimOwnership() nameServer.remove(appName) except Exception as e: # log.warning("Cannot remove application %s from nameServer %s" % (appName, nameServer)) log.exception(f"Cannot remove {appName} from {nameServer}?") # print("".join(Pyro5.errors.get_pyro_traceback())) self.setMetadata('Status', 'Failed') raise @Pyro5.api.oneway # in case call returns much later than daemon.shutdown def terminate(self): """ Terminates the application. Shutdowns daemons if created internally. """ self.setMetadata('Status', 'Finished') self.setMetadata('Date_time_end', time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())) # Remove application from nameServer # print("Removing") if self.pyroNS is not None: self.removeApp() if self.pyroDaemon: self.pyroDaemon.unregister(self) log.info("Unregistering daemon %s" % self.pyroDaemon) # log.info(self.pyroDaemon) if not self.externalDaemon: self.pyroDaemon.shutdown() self.pyroDaemon = None else: log.info("Terminating model") def getURI(self): """ :return: Returns the application URI or None if application not registered in Pyro :rtype: str """ return self.pyroURI def printMetadata(self, nonEmpty=False): """ Print all metadata :param bool nonEmpty: Optionally print only non-empty values :return: None :rtype: None """ if self.hasMetadata('Name'): print('AppName:\'%s\':' % self.getMetadata('Name')) super().printMetadata(nonEmpty) def setJobID(self, jobid): self._jobID = jobid def getJobID(self): return self._jobID @Pyro5.api.expose class RemoteModel (object): """ Remote Application instances are normally represented by auto generated pyro proxy. However, when application is allocated using JobManager or ssh tunnel, the proper termination of the tunnel or job manager task is required. This class is a decorator around pyro proxy object represeting application storing the reference to job manager and related jobID or/and ssh tunnel. These extermal attributes could not be injected into Application instance, as it is remote instance (using proxy) and the termination of job and tunnel has to be done from local computer, which has the neccesary communication link established (ssh tunnel in particular, when port translation takes place) """ def __init__(self, decoratee, jobMan=None, jobID=None, appTunnel=None): self._decoratee = decoratee self._jobMan = jobMan self._jobID = jobID self._appTunnel = appTunnel def __getattr__(self, name): """ Catch all attribute access and pass it to self._decoratee, see python data model, __getattr__ method """ return getattr(self._decoratee, name) def getJobID(self): return self._jobID @Pyro5.api.oneway # in case call returns much later than daemon.shutdown def terminate(self): """ Terminates the application. Terminates the allocated job at jobManager """ if self._decoratee is not None: self._decoratee.terminate() self._decoratee = None if self._jobMan and self._jobID: try: log.info("RemoteApplication: Terminating jobManager job %s on %s" % ( str(self._jobID), self._jobMan.getNSName())) self._jobMan.terminateJob(self._jobID) self._jobID = None except Exception as e: print(e) self.setMetadata('Status', 'Failed') finally: self._jobMan.terminateJob(self._jobID) self._jobID = None # close tunnel as the last step so an application is still reachable if self._appTunnel: # log.info ("RemoteApplication: Terminating sshTunnel of application") if self._appTunnel != "manual": self._appTunnel.terminate() def __del__(self): """ Destructor, calls terminate if not done before. """ self.terminate()
mupif/mupif
mupif/model.py
Python
lgpl-3.0
23,354
[ "LAMMPS" ]
8313e6b219a172b2fe8178da2aadcd50ca7734ca4ada9a20e1b8f9b8dc0bd483
#!/usr/bin/python # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at: http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distrib- # uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. See the License for # specific language governing permissions and limitations under the License. """Tests for card.py.""" import datetime import json import card import config import kmlify import model import test_utils import utils from google.appengine.api import urlfetch from google.appengine.ext import ndb # just for GeoPt KML_DATA = '''<?xml version="1.0" encoding="UTF-8" ?> <kml xmlns="http://earth.google.com/kml/2.2"> <Document> <name>Two cities</name> <Placemark> <name>Helsinki</name> <description>description1</description> <Point><coordinates>25,60</coordinates></Point> </Placemark> <Placemark> <Point><coordinates>-83,40,1</coordinates></Point> <description>&#x64;escription&lt;2&gt;two</description> <name>Columbus</name> </Placemark> </Document> </kml> ''' GEORSS_DATA = ''' <rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns="http://purl.org/rss/1.0/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:georss="http://www.georss.org/georss"> <channel xmlns="http://schemas.google.com/georss" xmlns:georss="http://schemas.google.com/georss"> <item> <title>Helsinki</title> <summary>description1</summary> <georss:point>60 25</georss:point> </item> <item> <georss:point> 40 -83 1 </georss:point> <summary>&#x64;escription&lt;2&gt;two</summary> <title>Columbus</title> </item> </channel> </rdf:RDF> ''' ATOM_DATA = ''' <feed xmlns="http://www.w3.org/2005/Atom" xmlns:georss="http://www.georss.org/georss"> <title>Two cities</title> <entry> <title>Helsinki</title> <summary>description1</summary> <georss:point>60 25</georss:point> </entry> <entry> <georss:point> 40 -83 1 </georss:point> <content>&#x64;escription&lt;2&gt;two</content> <title>Columbus</title> </entry> </feed> ''' GOOGLE_PLACES_SEARCH_JSON = { 'status': 'OK', 'html_attributions': [], 'results': [{ 'place_id': 'placeId1', 'vicinity': 'description1', 'geometry': { 'location': { 'lat': 60, 'lng': 25 } }, 'name': 'Helsinki' }, { 'place_id': 'placeId2', 'vicinity': 'description<2>two', 'geometry': { 'location': { 'lat': 40, 'lng': -83 } }, 'name': 'Columbus' }] } GOOGLE_PLACES_SEARCH_JSON_STR = json.dumps(GOOGLE_PLACES_SEARCH_JSON) PLACES_FEATURES = [ card.Feature('Helsinki', None, ndb.GeoPt(60, 25), 'layer4', gplace_id='placeId1', layer_type='GOOGLE_PLACES'), card.Feature('Columbus', None, ndb.GeoPt(40, -83), 'layer4', gplace_id='placeId2', layer_type='GOOGLE_PLACES') ] FEATURE_FIELDS = [ ('Helsinki', 'description1', ndb.GeoPt(60, 25)), ('Columbus', 'description<2>two', ndb.GeoPt(40, -83)) ] ROOT_URL = 'http://app.com/root' MAP_ROOT = { 'id': 'm1', 'topics': [{ 'id': 't1', 'title': 'Topic 1', 'layer_ids': ['layer1', 'layer3'], # select just some layers 'crowd_enabled': True, 'questions': [{ 'id': 'q1', 'title': 'Foo', 'type': 'CHOICE', 'choices': [ {'id': 'a1', 'color': '#0f0', 'label': 'Green'}, {'id': 'a2', 'color': '#f00', 'label': 'Red'} ] }, { 'id': 'q2', 'title': 'Qux', 'type': 'NUMBER' }] }, { 'id': 't2', 'title': 'Topic 2', 'layer_ids': ['layer2'], 'crowd_enabled': True, 'questions': [{ 'id': 'q1', 'title': 'Bar', 'type': 'CHOICE', 'answers': [ {'id': 'a1', 'color': '#0f0'}, {'id': 'a2', 'color': '#f00'}] }] }, { 'id': 't3', 'title': 'Topic 3', 'layer_ids': ['layer4'], 'crowd_enabled': True, 'questions': [{ 'id': 'q1', 'title': 'Pharmacies with foo', 'type': 'CHOICE', 'answers': [ {'id': 'a1', 'color': '#0f0'}, {'id': 'a2', 'color': '#f00'}] }] }], 'layers': [{ 'id': 'layer1', 'type': 'KML', 'source': {'kml': {'url': 'http://example.com/one.kml'}} }, { 'id': 'layer2', 'type': 'KML', 'source': {'kml': {'url': 'http://example.com/two.kml'}} }, { 'id': 'layer3', 'type': 'KML', 'source': {'kml': {'url': 'http://example.com/three.kml'}} }, { 'id': 'layer4', 'type': 'GOOGLE_PLACES', 'source': {'google_places': {'types': 'pharmacy'}} }] } class CardTest(test_utils.BaseTest): """Tests for functions in card.py.""" def setUp(self): super(CardTest, self).setUp() self.request = test_utils.SetupRequest('/.card/foo') def testRoundGeoPt(self): self.assertEquals('1.2323,-4.5657', card.RoundGeoPt(ndb.GeoPt(1.232323, -4.565656))) def testFeature(self): f1 = card.Feature('1', 'one', ndb.GeoPt(1, 2)) f2 = card.Feature('2', 'two', ndb.GeoPt(3, 4)) f3 = card.Feature('3', 'three', ndb.GeoPt(5, 6)) f1.distance = 1000 f2.distance = 2000 f3.distance = 1500 self.assertEquals([f1, f3, f2], sorted([f1, f2, f3])) self.assertEquals(1.0, f1.distance_km) self.assertEquals(1000/1609.344, f1.distance_mi) def testEarthDistance(self): def Distance(lat1, lon1, lat2, lon2): return card.EarthDistance(ndb.GeoPt(lat1, lon1), ndb.GeoPt(lat2, lon2)) self.assertEquals(0, Distance(5, 5, 5, 5)) self.assertTrue(abs(Distance(0, 0, 90, 0) - 10018538) < 1) self.assertTrue(abs(Distance(0, 0, 0, 90) - 10018538) < 1) self.assertTrue(abs(Distance(45, 0, 45, 90) - 6679025) < 1) def testInvalidContent(self): self.assertEquals([], card.GetFeaturesFromXml('xyz')) def testGetFeaturesFromKml(self): feature_fields = [(f.name, f.description_html, f.location) for f in card.GetFeaturesFromXml(KML_DATA)] self.assertEquals(FEATURE_FIELDS, feature_fields) def testGetFeaturesFromKml_attrs(self): attr = '<a href="google.com">attrX</a>' layer = {'id': 'layerX'} def GetResultFeatures(features): return [(f.name, f.description_html, f.location, f.html_attrs) for f in features] # Check that layer attribution is added to each feature layer['attribution'] = attr self.assertEquals( [f + ([attr],) for f in FEATURE_FIELDS], GetResultFeatures(card.GetFeaturesFromXml(KML_DATA, layer))) # Check that features' attributions list is empty when layer 'attribution' # field is empty layer['attribution'] = '' self.assertEquals( [f + ([],) for f in FEATURE_FIELDS], GetResultFeatures(card.GetFeaturesFromXml(KML_DATA, layer))) def testGetFeaturesFromGeoRss(self): feature_fields = [(f.name, f.description_html, f.location) for f in card.GetFeaturesFromXml(GEORSS_DATA)] self.assertEquals(FEATURE_FIELDS, feature_fields) def testGetFeaturesFromAtom(self): feature_fields = [(f.name, f.description_html, f.location) for f in card.GetFeaturesFromXml(ATOM_DATA)] self.assertEquals(FEATURE_FIELDS, feature_fields) def testGetKmlUrl(self): self.assertEquals('http://example.com/foo.kml', card.GetKmlUrl(ROOT_URL, { 'type': 'KML', 'source': { 'kml': { 'url': 'http://example.com/foo.kml' } } })) self.assertEquals('http://example.com/foo.rss', card.GetKmlUrl(ROOT_URL, { 'type': 'GEORSS', 'source': { 'georss': { 'url': 'http://example.com/foo.rss' } } })) self.AssertEqualsUrlWithUnorderedParams(( 'http://app.com/root/.kmlify' '?url=http://example.com/data.csv' '&type=csv' '&loc=latitude,longitude' '&icon=http://example.com/icon.png' '&color=123456' '&hotspot=tl' '&name=title' '&desc=description' '&cond=a<3' '&cond=b>4' '&cond=c=5' ), card.GetKmlUrl(ROOT_URL, { 'type': 'CSV', 'source': { 'csv': { 'url': 'http://example.com/data.csv', 'latitude_field': 'latitude', 'longitude_field': 'longitude', 'icon_url_template': 'http://example.com/icon.png', 'color_template': '123456', 'hotspot_template': 'tl', 'title_template': 'title', 'description_template': 'description', 'condition0': 'a<3', 'condition1': 'b>4', 'condition2': 'c=5' } } })) self.AssertEqualsUrlWithUnorderedParams(( 'http://app.com/root/.kmlify' '?url=https://docs.google.com/spreadsheet/pub?key=xyz%26output=csv' '&type=csv' '&loc=location' '&icon=http://example.com/icon.png' '&color=123456' '&hotspot=tl' '&name=title' '&desc=description' '&cond=a<3' '&cond=b>4' '&cond=c=5' ), card.GetKmlUrl(ROOT_URL, { 'type': 'GOOGLE_SPREADSHEET', 'source': { 'google_spreadsheet': { 'url': 'https://docs.google.com/spreadsheet/ccc' '?key=xyz&foo=bar#gid=0', 'latitude_field': 'location', 'longitude_field': 'location', 'icon_url_template': 'http://example.com/icon.png', 'color_template': '123456', 'hotspot_template': 'tl', 'title_template': 'title', 'description_template': 'description', 'condition0': 'a<3', 'condition1': 'b>4', 'condition2': 'c=5' } } })) self.AssertEqualsUrlWithUnorderedParams(( 'http://app.com/root/.kmlify' '?url=http://example.com/geodata.json' '&type=geojson' '&name=title' '&desc=description' '&cond=a<3' '&cond=b>4' '&cond=c=5' ), card.GetKmlUrl(ROOT_URL, { 'type': 'GEOJSON', 'source': { 'geojson': { 'url': 'http://example.com/geodata.json', 'title_template': 'title', 'description_template': 'description', 'condition0': 'a<3', 'condition1': 'b>4', 'condition2': 'c=5' } } })) self.assertEquals( 'http://example.com/kml?mid=someRandomMid', card.GetKmlUrl(ROOT_URL, { 'type': 'GOOGLE_MAPS_ENGINE_LITE_OR_PRO', 'source': { 'kml': { 'url': 'http://example.com/viewer?mid=someRandomMid' } } })) def testGetFeaturesFromPlacesLayer(self): self.AssertGetFeaturesFromPlacesLayer(GOOGLE_PLACES_SEARCH_JSON_STR, PLACES_FEATURES) # Try the same request again and make sure the result comes from cache # (i.e. there are no calls to the urlfetch) self.mox.StubOutWithMock(urlfetch, 'fetch') self.mox.ReplayAll() self.assertEquals( PLACES_FEATURES, card.GetFeaturesFromPlacesLayer(MAP_ROOT.get('layers')[3], ndb.GeoPt(20, 50), 100000)) def testGetFeaturesFromPlacesLayer_WithBadResponseStatus(self): self.AssertGetFeaturesFromPlacesLayer( '{"status": "REQUEST_DENIED", "results": []}', []) def testGetFeaturesFromPlacesLayer_WithZeroResults(self): self.AssertGetFeaturesFromPlacesLayer( '{"status": "ZERO_RESULTS", "results": []}', []) def AssertGetFeaturesFromPlacesLayer(self, api_response_content, expected_results): """Verifies GetFeaturesFromPlacesLayer with given input and output. Prepares a mock for urlfetch to return given api_response_content on a call to the Places API. Verifies that GetJsonFromGooglePlacesApi returns expected_results given the urlfetch mock setup. Args: api_response_content: Content that urlfetch should return expected_results: an array of Places results that GetJsonFromGooglePlacesApi should return """ config.Set('google_api_server_key', 'someFakeApiKey') # Simulate a successful fetch from Places API by setting up a fake # for urlfetch url = ('https://maps.googleapis.com/maps/api/place/nearbysearch/json?' 'location=20.0%2C50.0' '&rankby=prominence' '&radius=100000' '&types=pharmacy' '&key=someFakeApiKey') url_responses = {url: utils.Struct(content=api_response_content)} self.mox.stubs.Set( urlfetch, 'fetch', lambda url, **kwargs: url_responses[url]) # Get Features based on Google Places API results for the layer self.assertEquals( expected_results, card.GetFeaturesFromPlacesLayer(MAP_ROOT.get('layers')[3], ndb.GeoPt(20, 50), 100000)) self.mox.UnsetStubs() def testSetDetailsOnFilteredFeatures(self): config.Set('google_api_server_key', 'someFakeApiKey') # Simulate a successful fetch from Places API by setting up a fake urlfetch url_responses = {} helsinki_attrs = ['Listing by <a href="fakeurl1.com">FakeSite1</a>'] api_response_content = json.dumps({ 'status': 'OK', 'html_attributions': helsinki_attrs, 'result': { 'formatted_address': 'Street1', 'formatted_phone_number': '111-111-1111' } }) url = card.PLACES_API_DETAILS_URL + 'placeid=placeId1&key=someFakeApiKey' url_responses[url] = utils.Struct(content=api_response_content) columbus_attrs = ['Listing by <a href="fakeurl2.com">FakeSite2</a>'] api_response_content = json.dumps({ 'status': 'OK', 'html_attributions': columbus_attrs, 'result': { 'formatted_address': 'Street2', 'formatted_phone_number': '222-222-2222' } }) url = card.PLACES_API_DETAILS_URL + 'placeid=placeId2&key=someFakeApiKey' url_responses[url] = utils.Struct(content=api_response_content) self.mox.stubs.Set( urlfetch, 'fetch', lambda url, **kwargs: url_responses[url]) exp_features = [ ('Helsinki', '<div>Street1</div><div>111-111-1111</div>', helsinki_attrs), ('Columbus', '<div>Street2</div><div>222-222-2222</div>', columbus_attrs) ] features = PLACES_FEATURES[:] card.SetDetailsOnFilteredFeatures(features) self.assertEquals(exp_features, [(f.name, f.description_html, f.html_attrs) for f in features]) def testGetCardLevelAttributions(self): places_attr = 'Listing by <a href="google.com">Google</a>' f1 = card.Feature('1', '', None, layer_type='GOOGLE_PLACES') f2 = card.Feature('2', '', None, layer_type='GOOGLE_PLACES', html_attrs=[places_attr]) f3 = card.Feature('3', '', None, layer_type='GOOGLE_PLACES', html_attrs=[places_attr, 'Attr3']) f4 = card.Feature('4', '', None, layer_type='KML', html_attrs=['kmlAttr']) f5 = card.Feature('5', '', None, layer_type='KML') features = [f1, f2, f3, f4, f5] html_attrs = card.GetCardLevelAttributions(features) # Check that card level attributions only include those from Google Places self.assertEquals(2, len(html_attrs)) self.assertTrue(places_attr in html_attrs) self.assertTrue('Attr3' in html_attrs) # Verify that individual html attributions for Google Places features were # cleared self.assertEquals(None, f2.html_attrs) self.assertEquals(None, f3.html_attrs) # Verify that KML feature individual attribution is untouched self.assertEquals(1, len(f4.html_attrs)) def testGetFeatures(self): # Try getting features for a topic with two layers. self.SetForTest(kmlify, 'FetchData', lambda url, host: 'data from ' + url) self.SetForTest( card, 'GetFeaturesFromXml', lambda data, layer: ['parsed ' + data + ' for ' + layer.get('id')]) self.assertEquals( ['parsed data from http://example.com/one.kml for layer1', 'parsed data from http://example.com/three.kml for layer3'], card.GetFeatures(MAP_ROOT, 'm1', 't1', self.request, ndb.GeoPt(20, 50), 100000)) def testGetFeaturesWithFailedFetches(self): # Even if some fetches fail, we should get features from the others. def FetchButSometimesFail(url, unused_host): if 'one.kml' in url: raise urlfetch.DownloadError return 'data from ' + url self.SetForTest(kmlify, 'FetchData', FetchButSometimesFail) self.SetForTest(card, 'GetFeaturesFromXml', lambda data, layer: ['parsed ' + data]) self.assertEquals(['parsed data from http://example.com/three.kml'], card.GetFeatures(MAP_ROOT, 'm1', 't1', self.request, ndb.GeoPt(20, 50), 100000)) def testGetFeaturesWithFailedParsing(self): # Even if some files don't parse, we should get features from the others. def ParseButSometimesFail(data, layer): if not layer: return if 'three.kml' in data: raise SyntaxError return ['parsed ' + data] self.SetForTest(kmlify, 'FetchData', lambda url, host: 'data from ' + url) self.SetForTest(card, 'GetFeaturesFromXml', ParseButSometimesFail) self.assertEquals(['parsed data from http://example.com/one.kml'], card.GetFeatures(MAP_ROOT, 'm1', 't1', self.request, ndb.GeoPt(20, 50), 100000)) def testGetFeaturesWithInvalidTopicId(self): # GetFeatures should accept a nonexistent topic without raising exceptions. self.assertEquals([], card.GetFeatures(MAP_ROOT, 'm1', 'xyz', self.request, ndb.GeoPt(20, 50), 100000)) def testGetAnswersAndReports(self): now = datetime.datetime.utcnow() seconds = lambda s: datetime.timedelta(seconds=s) now_minus_1, now_minus_2 = now - seconds(1), now - seconds(2) reports = [ # Most recent report has answers for q1 and q2. model.CrowdReport(answers_json='{"m1.t1.q1": "a1", "m1.t1.q2": "a2"}', id='r1', text='', effective=now), # Older answer to m1.t1.q2 should be superceded by recent answer model.CrowdReport(answers_json='{"m1.t1.q2": "a3", "m1.t1.q3": "a3"}', id='r2', text='hello', effective=now_minus_1), # Answers for irrelevant maps or topics should be ignored model.CrowdReport(answers_json='{"m1.t2.q4": "a4", "m2.t1.q5": "a5"}', id='r3', text='goodbye', effective=now_minus_2) ] self.SetForTest(model.CrowdReport, 'GetByLocation', staticmethod(lambda *args, **kwargs: reports)) self.assertEquals( ({'q1': 'a1', 'q2': 'a2', 'q3': 'a3', '_text': 'hello'}, {'q1': now, 'q2': now, 'q3': now_minus_1, '_text': now_minus_1}, [{'_id': 'r1', '_effective': now, 'q1': 'a1', 'q2': 'a2', '_text': ''}, {'_id': 'r2', '_effective': now_minus_1, 'q2': 'a3', 'q3': 'a3', '_text': 'hello'}, {'_id': 'r3', '_effective': now_minus_2, '_text': 'goodbye'}]), card.GetAnswersAndReports('m1', 't1', 'location', 100)) def testGetLegibleTextColor(self): # Black on a light background; white on a dark background self.assertEquals('#000', card.GetLegibleTextColor('#999')) self.assertEquals('#fff', card.GetLegibleTextColor('#777')) # Medium green is lighter than medium red. self.assertEquals('#000', card.GetLegibleTextColor('#0f0')) self.assertEquals('#fff', card.GetLegibleTextColor('#ff0000')) def testSetAnswersAndReportsOnFeatures(self): features = [card.Feature('title1', 'description1', ndb.GeoPt(1, 1)), card.Feature('title2', 'description2', ndb.GeoPt(2, 2))] now = datetime.datetime.utcnow() def FakeGetAnswersAndReports(unused_1, unused_2, location, unused_3): if location.lat < 1.5: return ({'q1': 'a1', '_text': 'hello'}, {'q1': now, '_text': now}, [{'_id': 'r1', '_effective': now, 'q1': 'a1', '_text': 'hello'}]) else: return ({'q1': 'a2', 'q2': 3, '_text': 'goodbye'}, {'q1': now, 'q2': now, '_text': now}, [{'_id': 'r2', '_effective': now - datetime.timedelta(minutes=70), 'q1': 'a2', 'q2': 3, '_text': 'goodbye'}]) self.SetForTest(card, 'GetAnswersAndReports', FakeGetAnswersAndReports) card.SetAnswersAndReportsOnFeatures( features, MAP_ROOT, 't1', ['q1', 'q2', '_text']) self.assertEquals('Green.', features[0].answer_text) self.assertEquals('#0f0', features[0].status_color) self.assertEquals('Red. Qux: 3.', features[1].answer_text) self.assertEquals('#f00', features[1].status_color) self.assertEquals( [{'answer_summary': 'Green.', 'effective': 'just now', 'id': 'r1', 'text': 'hello', 'status_color': '#0f0', 'age_minutes': 0}], features[0].reports) self.assertEquals( [{'answer_summary': 'Red. Qux: 3.', 'effective': '70m ago', 'id': 'r2', 'text': 'goodbye', 'status_color': '#f00', 'age_minutes': 70}], features[1].reports) def testSetDistanceOnFeatures(self): features = [card.Feature('title1', 'description1', ndb.GeoPt(1, 1)), card.Feature('title2', 'description2', ndb.GeoPt(2, 2))] card.SetDistanceOnFeatures(features, ndb.GeoPt(1, 1)) self.assertEquals(0, features[0].distance) self.assertTrue(abs(features[1].distance - 157398) < 1) def testFilterFeatures(self): all_features = [card.Feature('name3', 'desc3', ndb.GeoPt(3, 3)), card.Feature('name2', 'desc2', ndb.GeoPt(2, 2)), card.Feature('name1', 'desc1', ndb.GeoPt(1, 1))] all_features[0].distance = 3 all_features[1].distance = 2 all_features[2].distance = 1 # Not limited; should give all three features features = all_features[:] card.FilterFeatures(features, 100, 100) self.assertEquals(['name1', 'name2', 'name3'], [f.name for f in features]) # Limit by radius features = all_features[:] card.FilterFeatures(features, 2.5, 100) self.assertEquals(['name1', 'name2'], [f.name for f in features]) # Limit by count features = all_features[:] card.FilterFeatures(features, 100, 1) self.assertEquals(['name1'], [f.name for f in features]) def testGetGeoJson(self): html_attrs = ['<a href="google.com">attr1</a>', 'attr2'] features = [card.Feature('title1', 'description1', ndb.GeoPt(20, -40), html_attrs=html_attrs), card.Feature('title2', 'description2', ndb.GeoPt(30, -50))] card.SetDistanceOnFeatures(features, ndb.GeoPt(20, -40)) geojson = card.GetGeoJson(features, include_descriptions=True) self.assertEquals('FeatureCollection', geojson['type']) self.assertEquals(2, len(geojson['features'])) self.assertEquals({'geometry': {'coordinates': [-40.0, 20.0], 'type': 'Point'}, 'properties': {'answer_text': '', 'answer_time': '', 'answer_source': '', 'answers': {}, 'reports': [], 'status_color': None, 'description_html': 'description1', 'html_attrs': html_attrs, 'distance': 0.0, 'distance_km': 0.0, 'distance_mi': 0.0, 'layer_id': None, 'name': 'title1'}, 'type': 'Feature'}, geojson['features'][0]) class CardHandlerTest(test_utils.BaseTest): """Tests for request handlers in card.py.""" def setUp(self): super(CardHandlerTest, self).setUp() map_object = test_utils.CreateMap(MAP_ROOT) self.map_id = map_object.id with test_utils.RootLogin(): model.CatalogEntry.Create('xyz.com', 'foo', map_object) def testGetCardByIdAndTopic(self): self.SetForTest(kmlify, 'FetchData', lambda url, host: KML_DATA) with test_utils.RootLogin(): geojson = self._GetGeoJson('/.card/%s.t1' % self.map_id) self.assertEquals('Topic 1', geojson['properties']['topic']['title']) self.assertTrue(self._FeatureInResponse(geojson, 'Helsinki')) self.assertTrue(self._FeatureInResponse(geojson, 'Columbus')) def testGetCardByLabelAndTopic(self): self.SetForTest(kmlify, 'FetchData', lambda url, host: KML_DATA) geojson = self._GetGeoJson('/xyz.com/.card/foo/t2') self.assertEquals('FeatureCollection', geojson['type']) self.assertEquals('Topic 2', geojson['properties']['topic']['title']) self.assertEquals(2, len(geojson['features'])) self.assertTrue(self._FeatureInResponse(geojson, 'Helsinki')) self.assertTrue(self._FeatureInResponse(geojson, 'Columbus')) # Verify there are no descriptions, since show_desc param isn't set # in the request self.assertEquals(None, geojson['features'][0]['properties']['description_html']) # Verify there are no descriptions with show_desc=0 param in the request geojson = self._GetGeoJson('/xyz.com/.card/foo/t2?show_desc=0') self.assertEquals(None, geojson['features'][0]['properties']['description_html']) def testGetCardByLabelAndTopicReports(self): now = datetime.datetime.utcnow() reports = [ # Most recent report has answers for q1 and q2. model.CrowdReport(answers_json='{"m1.t2.q1": "a1", "m1.t2.q2": "a2"}', id='r1', text='', effective=now) ] self.SetForTest(model.CrowdReport, 'GetByLocation', staticmethod(lambda *args, **kwargs: reports)) self.SetForTest(kmlify, 'FetchData', lambda url, host: KML_DATA) # Verify there are reports with show_reports=1 param in the request geojson = self._GetGeoJson('/xyz.com/.card/foo/t2?qids=q1&show_reports=1') self.assertEquals(1, len(geojson['features'][0]['properties']['reports'])) # Verify there are no reports with show_reports missing from the request geojson = self._GetGeoJson('/xyz.com/.card/foo/t2?qids=q1') self.assertEquals(0, len(geojson['features'][0]['properties']['reports'])) # Verify there are no reports with show_reports=0 in the request geojson = self._GetGeoJson('/xyz.com/.card/foo/t2?qids=q1&show_reports=0') self.assertEquals(0, len(geojson['features'][0]['properties']['reports'])) def testGetCardByLabelAndTopicWithDescriptionsEnabled(self): self.SetForTest(kmlify, 'FetchData', lambda url, host: KML_DATA) # Enable descriptions with show_desc=1 param in the request geojson = self._GetGeoJson('/xyz.com/.card/foo/t2?show_desc=1') self.assertEquals('Topic 2', geojson['properties']['topic']['title']) self.assertTrue(self._FeatureInResponse(geojson, 'Helsinki')) self.assertTrue(self._FeatureInResponse(geojson, 'Columbus')) # Verify descriptions show up (with all the html tags removed) self.assertEquals('description1', geojson['features'][0]['properties']['description_html']) self.assertEquals('description<2>two', geojson['features'][1]['properties']['description_html']) def testGetCardByLabelAndTopicWithDescriptionsXss(self): kml_data_with_xss = '''<?xml version="1.0" encoding="UTF-8" ?> <kml xmlns="http://earth.google.com/kml/2.2"> <Document> <name>Cities</name> <Placemark> <name>Paris</name> <description><![CDATA[<b>description1</b>-<div>addr</div><script>EvilScript</script>]]></description> <Point><coordinates>25,60</coordinates></Point> </Placemark> </Document> </kml> ''' self.SetForTest(kmlify, 'FetchData', lambda url, host: kml_data_with_xss) # Enable descriptions with show_desc=1 param in the request geojson = self._GetGeoJson('/xyz.com/.card/foo/t2?show_desc=1') self.assertTrue(self._FeatureInResponse(geojson, 'Paris')) # Verify <script> doesn't show up in the description, but <b> stays self.assertEquals('<b>description1</b>-<div>addr</div>EvilScript', geojson['features'][0]['properties']['description_html']) def testPostByLabelAndTopic(self): self.SetForTest(kmlify, 'FetchData', lambda url, host: KML_DATA) response = self.DoPost('/xyz.com/.card/foo/t2', 'll=60,25&n=1&r=100') geojson = json.loads(response.body) self.assertEquals('Topic 2', geojson['properties']['topic']['title']) self.assertTrue(self._FeatureInResponse(geojson, 'Helsinki')) self.assertFalse(self._FeatureInResponse(geojson, 'Columbus')) def testGetCardByTopic(self): response = self.DoGet('/xyz.com/.card/foo') self.assertEquals('foo/t1', response.headers['Location']) def testFeatureDistanceUnits(self): self.SetForTest(kmlify, 'FetchData', lambda url, host: KML_DATA) def AssertUnitsInResponseTo(expected_unit, url, country_header=None): headers = ({'X-AppEngine-Country': country_header} if country_header else {}) response = self.DoGet(url, headers=headers) geojson = json.loads(response.body) self.assertEquals(expected_unit, geojson['properties']['unit']) # Default: no units in the request, no auto-detected country AssertUnitsInResponseTo('km', '/xyz.com/.card/foo/t1') # Response uses units from the request AssertUnitsInResponseTo('mi', '/xyz.com/.card/foo/t1?unit=mi') # Response uses units from the request country AssertUnitsInResponseTo('km', '/xyz.com/.card/foo/t1', country_header='CA') AssertUnitsInResponseTo('mi', '/xyz.com/.card/foo/t1', country_header='US') # Response uses units from the request (ignoring any auto-determined # units based on request country) AssertUnitsInResponseTo('km', '/xyz.com/.card/foo/t1?unit=km', country_header='US') def testMapLink(self): self.SetForTest(kmlify, 'FetchData', lambda url, host: KML_DATA) def AssertMapLinkInResponseTo(expected_link, url): response = self.DoGet(url) geojson = json.loads(response.body) self.assertEquals(expected_link, geojson['properties']['map_url']) # Request has map id and topic id: map_url should be empty with test_utils.RootLogin(): AssertMapLinkInResponseTo(None, '/.card/%s.t1' % self.map_id) # Request has map label, topic id: map_url should only include layers of a # requested topic AssertMapLinkInResponseTo( test_utils.ROOT_URL + '/xyz.com/foo?layers=layer1,layer3' '&llbox=68.0,32.0,68.2,-126.2', '/xyz.com/.card/foo/t1') def _GetGeoJson(self, url): response = self.DoGet(url) return json.loads(response.body) def _FeatureInResponse(self, geojson, name): for f in geojson['features']: if f['properties']['name'] == name: return True return False if __name__ == '__main__': test_utils.main()
googlearchive/googlecrisismap
card_test.py
Python
apache-2.0
32,603
[ "COLUMBUS" ]
29964654844ec7cd16f9f6e5e5aa22527c7909f08379b4a1974fe0c96a39f2ae
#!/usr/bin/env python3 '''Adds show_arrows command to pymol, which takes an xyz file''' import sys import os from pymol import cmd, cgo, CmdException from chempy import cpv def draw_arrow(xyz1,xyz2, radius=0.5, gap=0.0, hlength=-1, hradius=-1, color='blue red', name=''): ''' Draw an arrow; borrows heavily from cgi arrows. ''' radius, gap = float(radius), float(gap) hlength, hradius = float(hlength), float(hradius) xyz1 = list(xyz1) xyz2 = list(xyz2) try: color1, color2 = color.split() except: color1 = color2 = color color1 = list(cmd.get_color_tuple(color1)) color2 = list(cmd.get_color_tuple(color2)) normal = cpv.normalize(cpv.sub(xyz1, xyz2)) if hlength < 0: hlength = radius * 3.0 if hradius < 0: hradius = hlength * 0.6 if gap: diff = cpv.scale(normal, gap) xyz1 = cpv.sub(xyz1, diff) xyz2 = cpv.add(xyz2, diff) xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2) obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + \ [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + \ [1.0, 0.0] if not name: name = cmd.get_unused_name('arrow') cmd.load_cgo(obj, name) def make_pymol_arrows(base, atoms, scale, color, radius): arrow_objs = [] arrow_group = base + '_arrows' cmd.delete(arrow_group) #remove any pre-existing group for i, atom in enumerate(atoms): arrow_obj = base + '_arrow_' + str(i) arrow_objs.append(arrow_obj) elem, xi, yi, zi, dx, dy, dz = atom c = 1.725*radius xf = xi + -scale*dx + c yf = yi + -scale*dy + c zf = zi + -scale*dz + c draw_arrow((xi,yi,zi),(xf,yf,zf),radius=radius,color=color,name=arrow_obj) cmd.group(arrow_group,' '.join(arrow_objs)) def xyz_line_to_atom(xyz_line): fields = xyz_line.split() elem = fields[0] x = float(fields[1]) y = float(fields[2]) z = float(fields[3]) dx = float(fields[4]) dy = float(fields[5]) dz = float(fields[6]) return elem, x, y, z, dx, dy, dz def read_xyz_file(xyz_file): with open(xyz_file, 'r') as f: lines = f.readlines() n_atoms = int(lines[0]) atoms = [] for i in range(n_atoms): atom = xyz_line_to_atom(lines[2+i]) atoms.append(atom) return atoms def show_xyz_arrows(xyzfile, scale=2.0, color="white purple",radius=0.2): atoms = read_xyz_file(xyzfile) base_name = xyzfile.replace('.xyz', '') make_pymol_arrows(base_name, atoms, float(scale), color, float(radius)) cmd.extend('show_xyz_arrows', show_xyz_arrows)
gnina/scripts
show_xyz_arrows.py
Python
bsd-3-clause
2,688
[ "ChemPy", "PyMOL" ]
7f89e8c8a6445468ddaf8e3a8505c5c3e4a6db0e973dda552d488d8dfd279d49
""" Utilities for creating and working with Zinc Fields. """ from opencmiss.utils.zinc.general import ChangeManager from opencmiss.zinc.element import Mesh from opencmiss.zinc.field import Field, FieldFiniteElement, FieldGroup, \ FieldNodeGroup, FieldStoredMeshLocation from opencmiss.zinc.fieldmodule import Fieldmodule from opencmiss.zinc.node import Nodeset from opencmiss.zinc.result import RESULT_OK def field_is_managed_coordinates(field_in: Field): """ Conditional function returning True if the field is Finite Element type, with coordinate type attribute, up to 3 components, and is managed. """ return (field_in is not None) and field_in.isManaged() and\ (field_in.getNumberOfComponents() <= 3) and\ field_in.castFiniteElement().isValid() and field_in.isTypeCoordinate() def field_is_managed_group(field_in: Field): """ Conditional function returning True if the field is a managed Group. """ return field_in.castGroup().isValid() and field_in.isManaged() def assign_field_parameters(target_field: Field, source_field: Field): """ Copy parameters from sourceField to targetField. Currently only works for node parameters. """ field_assignment = target_field.createFieldassignment(source_field) field_assignment.assign() def create_fields_displacement_gradients(coordinates: Field, reference_coordinates: Field, mesh: Mesh): """ :return: 1st and 2nd displacement gradients of (coordinates - referenceCoordinates) w.r.t. referenceCoordinates. """ assert (coordinates.getNumberOfComponents() == 3) and (reference_coordinates.getNumberOfComponents() == 3) fieldmodule = mesh.getFieldmodule() dimension = mesh.getDimension() with ChangeManager(fieldmodule): if dimension == 3: u = coordinates - reference_coordinates displacement_gradient = fieldmodule.createFieldGradient(u, reference_coordinates) displacement_gradient2 = fieldmodule.createFieldGradient(displacement_gradient, reference_coordinates) elif dimension == 2: # Note this needs improvement as missing cross terms # assume xi directions are approximately normal; # effect is to penalise elements where this is not so, which is also desired dX_dxi1 = fieldmodule.createFieldDerivative(reference_coordinates, 1) dX_dxi2 = fieldmodule.createFieldDerivative(reference_coordinates, 2) dx_dxi1 = fieldmodule.createFieldDerivative(coordinates, 1) dx_dxi2 = fieldmodule.createFieldDerivative(coordinates, 2) dS1_dxi1 = fieldmodule.createFieldMagnitude(dX_dxi1) dS2_dxi2 = fieldmodule.createFieldMagnitude(dX_dxi2) du_dS1 = (dx_dxi1 - dX_dxi1)/dS1_dxi1 du_dS2 = (dx_dxi2 - dX_dxi2)/dS2_dxi2 displacement_gradient = fieldmodule.createFieldConcatenate([du_dS1, du_dS2]) # curvature: d2u_dSdxi1 = fieldmodule.createFieldDerivative(displacement_gradient, 1) d2u_dSdxi2 = fieldmodule.createFieldDerivative(displacement_gradient, 2) displacement_gradient2 = fieldmodule.createFieldConcatenate([ d2u_dSdxi1/dS1_dxi1, d2u_dSdxi2/dS2_dxi2 ]) else: # dimension == 1 dX_dxi1 = fieldmodule.createFieldDerivative(reference_coordinates, 1) dx_dxi1 = fieldmodule.createFieldDerivative(coordinates, 1) dS1_dxi1 = fieldmodule.createFieldMagnitude(dX_dxi1) displacement_gradient = (dx_dxi1 - dX_dxi1)/dS1_dxi1 # curvature: displacement_gradient2 = fieldmodule.createFieldDerivative(displacement_gradient, 1)/dS1_dxi1 return displacement_gradient, displacement_gradient2 def create_field_euler_angles_rotation_matrix(fieldmodule: Fieldmodule, euler_angles: Field) -> Field: """ From OpenCMISS-Zinc graphics_library.cpp, matrix transposed to row major. Matrix is product RzRyRx, giving rotation about x, then y, then z with positive angles rotating by right hand rule about axis. :param fieldmodule: The fieldmodule to create the field in. :param euler_angles: 3-component field of angles in radians, components: 0 = azimuth (about z) 1 = elevation (about y) 2 = roll (about x) :return: 3x3 rotation matrix field suitable for pre-multiplying vector v i.e. v' = Mv """ assert euler_angles.getNumberOfComponents() == 3 with ChangeManager(fieldmodule): azimuth = fieldmodule.createFieldComponent(euler_angles, 1) cos_azimuth = fieldmodule.createFieldCos(azimuth) sin_azimuth = fieldmodule.createFieldSin(azimuth) elevation = fieldmodule.createFieldComponent(euler_angles, 2) cos_elevation = fieldmodule.createFieldCos(elevation) sin_elevation = fieldmodule.createFieldSin(elevation) roll = fieldmodule.createFieldComponent(euler_angles, 3) cos_roll = fieldmodule.createFieldCos(roll) sin_roll = fieldmodule.createFieldSin(roll) minus_one = fieldmodule.createFieldConstant([-1.0]) cos_azimuth_sin_elevation = cos_azimuth*sin_elevation sin_azimuth_sin_elevation = sin_azimuth*sin_elevation matrix_components = [ cos_azimuth*cos_elevation, cos_azimuth_sin_elevation*sin_roll - sin_azimuth*cos_roll, cos_azimuth_sin_elevation*cos_roll + sin_azimuth*sin_roll, sin_azimuth*cos_elevation, sin_azimuth_sin_elevation*sin_roll + cos_azimuth*cos_roll, sin_azimuth_sin_elevation*cos_roll - cos_azimuth*sin_roll, minus_one*sin_elevation, cos_elevation*sin_roll, cos_elevation*cos_roll] rotation_matrix = fieldmodule.createFieldConcatenate(matrix_components) return rotation_matrix def create_field_mesh_integral(coordinates: Field, mesh: Mesh, number_of_points=3): """ Create a field integrating the coordinates to give scalar volume/area/length over the mesh, depending on its dimension. :param coordinates: :param mesh: :param number_of_points: Number of Gauss points. :return: Field giving volume of coordinates field over mesh via Gaussian quadrature. """ fieldmodule = coordinates.getFieldmodule() with ChangeManager(fieldmodule): mesh_integral_field = fieldmodule.createFieldMeshIntegral(fieldmodule.createFieldConstant(1.0), coordinates, mesh) mesh_integral_field.setNumbersOfPoints(number_of_points) return mesh_integral_field def _create_plane_equation_formulation(fieldmodule, finite_element_field, plane_normal_field, point_on_plane_field): """ Create an iso-scalar field that is based on the plane equation. """ d = fieldmodule.createFieldDotProduct(plane_normal_field, point_on_plane_field) iso_scalar_field = fieldmodule.createFieldDotProduct(finite_element_field, plane_normal_field) - d return iso_scalar_field def create_field_image(fieldmodule, image_filename, name='image'): """ Create an image field using the given fieldmodule. The image filename must exist and be a known image type. :param fieldmodule: The fieldmodule to create the field in. :param image_filename: Image filename. :param name: Optional name of the image field, defaults to 'image'. :return: The image field created. """ image_field = fieldmodule.createFieldImage() image_field.setName(name) image_field.setFilterMode(image_field.FILTER_MODE_LINEAR) # Create a stream information object that we can use to read the # image file from disk stream_information = image_field.createStreaminformationImage() # We are reading in a file from the local disk so our resource is a file. stream_information.createStreamresourceFile(image_filename) # Actually read in the image file into the image field. image_field.read(stream_information) return image_field def create_fields_transformations(coordinates: Field, rotation_angles=None, scale_value=1.0, translation_offsets=None): """ Create constant fields for rotation, scale and translation containing the supplied values, plus the transformed coordinates applying them in the supplied order. :param coordinates: The coordinate field to scale, 3 components. :param rotation_angles: List of euler angles, length = number of components. See create_field_euler_angles_rotation_matrix. :param scale_value: Scalar to multiply all components of coordinates. :param translation_offsets: List of offsets, length = number of components. :return: 4 fields: transformedCoordinates, rotation, scale, translation """ if rotation_angles is None: rotation_angles = [0.0, 0.0, 0.0] if translation_offsets is None: translation_offsets = [0.0, 0.0, 0.0] components_count = coordinates.getNumberOfComponents() assert (components_count == 3) and (len(rotation_angles) == components_count) and isinstance(scale_value, float) \ and (len(translation_offsets) == components_count), "createTransformationFields. Invalid arguments" fieldmodule = coordinates.getFieldmodule() with ChangeManager(fieldmodule): # scale, translate and rotate model, in that order rotation = fieldmodule.createFieldConstant(rotation_angles) scale = fieldmodule.createFieldConstant(scale_value) translation = fieldmodule.createFieldConstant(translation_offsets) rotation_matrix = create_field_euler_angles_rotation_matrix(fieldmodule, rotation) rotated_coordinates = fieldmodule.createFieldMatrixMultiply(components_count, rotation_matrix, coordinates) transformed_coordinates = rotated_coordinates*scale + translation assert transformed_coordinates.isValid() return transformed_coordinates, rotation, scale, translation def create_field_volume_image(fieldmodule, image_filenames, name='volume_image'): """ Create an image field using the given fieldmodule. The image filename must exist and be a known image type. :param fieldmodule: The fieldmodule to create the field in. :param image_filenames: Image filename. :param name: Optional name of the image field, defaults to 'volume_image'. :return: The image field created. """ image_field = fieldmodule.createFieldImage() image_field.setName(name) image_field.setFilterMode(image_field.FILTER_MODE_LINEAR) # Create a stream information object that we can use to read the # image file from disk stream_information = image_field.createStreaminformationImage() # We are reading in a file from the local disk so our resource is a file. for image_filename in image_filenames: stream_information.createStreamresourceFile(image_filename) # Actually read in the image file into the image field. image_field.read(stream_information) return image_field def create_field_plane_visibility(fieldmodule, finite_element_field, plane_normal_field, point_on_plane_field): """ Create a visibility field that is based on the plane equation. """ d = fieldmodule.createFieldSubtract(finite_element_field, point_on_plane_field) p = fieldmodule.createFieldDotProduct(d, plane_normal_field) t = fieldmodule.createFieldConstant(0.1) v = fieldmodule.createFieldLessThan(p, t) return v def create_field_visibility_for_plane(fieldmodule: Fieldmodule, coordinate_field, plane): """ Create a visibility field for a plane. :param fieldmodule: Fieldmodule to own new field. :param coordinate_field: :param plane: :return: """ with ChangeManager(fieldmodule): normal_field = plane.getNormalField() rotation_point_field = plane.getRotationPointField() visibility_field = create_field_plane_visibility(fieldmodule, coordinate_field, normal_field, rotation_point_field) return visibility_field def create_field_iso_scalar_for_plane(fieldmodule: Fieldmodule, coordinate_field, plane): """ Create iso-scalar field for use with plane. :param fieldmodule: Fieldmodule to own new field. :param coordinate_field: :param plane: Plane description object. """ with ChangeManager(fieldmodule): normal_field = plane.getNormalField() rotation_point_field = plane.getRotationPointField() iso_scalar_field = _create_plane_equation_formulation(fieldmodule, coordinate_field, normal_field, rotation_point_field) return iso_scalar_field def get_group_list(fieldmodule): """ Get list of Zinc groups (FieldGroup) in fieldmodule. """ groups = [] field_iter = fieldmodule.createFielditerator() field = field_iter.next() while field.isValid(): group = field.castGroup() if group.isValid(): groups.append(group) field = field_iter.next() return groups def get_managed_field_names(fieldmodule): """ Get names of managed fields in fieldmodule. """ field_names = [] field_iter = fieldmodule.createFielditerator() field = field_iter.next() while field.isValid(): if field.isManaged(): field_names.append(field.getName()) field = field_iter.next() return field_names def field_exists(fieldmodule: Fieldmodule, name: str, field_type, components_count) -> bool: """ Tests to determine if the field with the given name exists in the given field module. :param fieldmodule: Zinc field module to search. :param name: Name of field to find. :param field_type: Type of field if derived type. Default: finiteelement. :param components_count: Number of components in the field. Default: 3. :return: True if the field is found in the module with the given name and number of components, false otherwise. """ field = fieldmodule.findFieldByName(name) if field.isValid(): if hasattr(field, 'cast' + field_type): field = getattr(field, 'cast' + field_type)() return field.isValid() and field.getNumberOfComponents() == components_count return field.getNumberOfComponents() == components_count return False def create_field_finite_element(fieldmodule: Fieldmodule, name: str, components_count: int, component_names=None, managed=False, type_coordinate=False) -> FieldFiniteElement: with ChangeManager(fieldmodule): field = fieldmodule.createFieldFiniteElement(components_count) field.setName(name) field.setManaged(managed) field.setTypeCoordinate(type_coordinate) if component_names is not None: for index, component_name in enumerate(component_names[:components_count]): field.setComponentName(index + 1, component_name) return field def create_field_finite_element_clone(source_field: Field, name: str, managed=False) -> FieldFiniteElement: """ Copy an existing Finite Element Field to a new field of supplied name. Note: does not handle time-varying parameters. New field is not managed by default. :param source_field: Zinc finite element field to copy. :param name: The name of the new field, asserts that no field of that name exists. :param managed: Managed state of field created here. :return: New identically defined field with supplied name. """ assert source_field.castFiniteElement().isValid(), \ "opencmiss.utils.zinc.field.createFieldFiniteElementClone. Not a Zinc finite element field" fieldmodule = source_field.getFieldmodule() field = fieldmodule.findFieldByName(name) assert not field.isValid(), "opencmiss.utils.zinc.field.createFieldFiniteElementClone. Target field name is in use" with ChangeManager(fieldmodule): # Zinc needs a function to do this efficiently; currently serialise to string, replace field name and reload! source_name = source_field.getName() region = fieldmodule.getRegion() sir = region.createStreaminformationRegion() srm = sir.createStreamresourceMemory() sir.setFieldNames([source_name]) region.write(sir) result, buffer = srm.getBuffer() # small risk of modifying other text here: source_bytes = bytes(") " + source_name + ",", "utf-8") target_bytes = bytes(") " + name + ",", "utf-8") buffer = buffer.replace(source_bytes, target_bytes) sir = region.createStreaminformationRegion() sir.createStreamresourceMemoryBuffer(buffer) result = region.read(sir) assert result == RESULT_OK # note currently must have called endChange before field can be found field = fieldmodule.findFieldByName(name).castFiniteElement() field.setManaged(managed) assert field.isValid() return field def find_or_create_field_finite_element(fieldmodule: Fieldmodule, name: str, components_count: int, component_names=None, managed=False, type_coordinate=False)\ -> FieldFiniteElement: """ Finds or creates a finite element field for the specified number of real components. :param fieldmodule: Zinc Fieldmodule to find or create field in. :param name: Name of field to find or create. :param components_count: Number of components / dimension of field, from 1 to 3. :param component_names: Optional list of component names. :param managed: Managed state of field if created here. :param type_coordinate: Default value of flag indicating field gives geometric coordinates. :return: Zinc FieldFiniteElement, invalid if error. """ assert (components_count > 0), "opencmiss.utils.zinc.field.find_or_create_field_finite_element." \ " Invalid components_count" assert (not component_names) or (len(component_names) >= components_count),\ "opencmiss.utils.zinc.field.find_or_create_field_finite_element. Invalid component_names" if field_exists(fieldmodule, name, 'FiniteElement', components_count): existing_field = fieldmodule.findFieldByName(name) return existing_field.castFiniteElement() return create_field_finite_element(fieldmodule, name, components_count, component_names, managed, type_coordinate) def create_field_coordinates(fieldmodule: Fieldmodule, name="coordinates", components_count=3, managed=False)\ -> FieldFiniteElement: """ Create RC coordinates finite element field of supplied name with number of components 1, 2, or 3 and the components named "x", "y" and "z" if used. New field is not managed by default. """ return create_field_finite_element(fieldmodule, name, components_count, component_names=("x", "y", "z"), managed=managed, type_coordinate=True) def find_or_create_field_coordinates(fieldmodule: Fieldmodule, name="coordinates", components_count=3, managed=True) \ -> FieldFiniteElement: """ Get or create RC coordinates finite element field of supplied name with number of components 1, 2, or 3 and the components named "x", "y" and "z" if used. New field is managed by default. """ assert 1 <= components_count <= 3 return find_or_create_field_finite_element(fieldmodule, name, components_count, component_names=("x", "y", "z"), managed=managed, type_coordinate=True) def create_field_fibres(fieldmodule: Fieldmodule, name="fibres", components_count=3, managed=False)\ -> FieldFiniteElement: """ Finds or creates a finite element fibre field. New field has component names: "fibre angle", "imbrication angle", "sheet angle". New field is not managed by default. :param fieldmodule: Zinc fieldmodule to find or create field in. :param name: Name of field to find or create. :param components_count: Number of components of field, from 1 to 3. :param managed: Managed state of field if created here. :return: Zinc FieldFiniteElement """ assert 1 <= components_count <= 3 with ChangeManager(fieldmodule): fibres = create_field_finite_element(fieldmodule, name, components_count, component_names=["fibre angle", "imbrication angle", "sheet angle"], managed=managed) fibres.setCoordinateSystemType(Field.COORDINATE_SYSTEM_TYPE_FIBRE) return fibres def find_or_create_field_fibres(fieldmodule: Fieldmodule, name="fibres", components_count=3, managed=True) \ -> FieldFiniteElement: """ Finds or creates a finite element fibre field. New field has component names: "fibre angle", "imbrication angle", "sheet angle". New field is managed by default. :param fieldmodule: Zinc fieldmodule to find or create field in. :param name: Name of field to find or create. :param components_count: Number of components of field, from 1 to 3. :param managed: Managed state of field if created here. :return: Zinc FieldFiniteElement """ assert 1 <= components_count <= 3 if field_exists(fieldmodule, name, 'FiniteElement', components_count): fibres = fieldmodule.findFieldByName(name).castFiniteElement() if fibres.getCoordinateSystemType() == Field.COORDINATE_SYSTEM_TYPE_FIBRE: return fibres return create_field_fibres(fieldmodule, name, components_count, managed=managed) def create_field_group(fieldmodule: Fieldmodule, name: str, managed=False) -> FieldGroup: """ Finds or creates a Group field of the supplied name. New field is not managed by default. :param fieldmodule: Zinc fieldmodule to find or create field in. :param name: Name of field to find or create. :param managed: Managed state of field if created here. :return: Zinc FieldGroup. """ with ChangeManager(fieldmodule): group = fieldmodule.createFieldGroup() group.setName(name) group.setManaged(managed) return group def find_or_create_field_group(fieldmodule: Fieldmodule, name: str, managed=True) -> FieldGroup: """ Finds or creates a Group field of the supplied name. New field is managed by default. :param fieldmodule: Zinc fieldmodule to find or create field in. :param name: Name of field to find or create. :param managed: Managed state of field if created here. :return: Zinc FieldGroup. """ field = fieldmodule.findFieldByName(name) if field.isValid(): group = field.castGroup() if group: return group return create_field_group(fieldmodule, name, managed=managed) def find_or_create_field_node_group(group: FieldGroup, nodeset: Nodeset) -> FieldNodeGroup: """ Gets or creates the node group field for the supplied nodeset in group. Field is managed by its parent group field. :param group: Zinc group field that manages child node group field. :param nodeset: A nodeset from group region to get or create subgroup of. :return: Zinc FieldNodeGroup. """ node_group = group.getFieldNodeGroup(nodeset) if not node_group.isValid(): node_group = group.createFieldNodeGroup(nodeset) return node_group def create_field_texture_coordinates(fieldmodule: Fieldmodule, name="texture coordinates", components_count=3, managed=False) -> FieldFiniteElement: """ Create texture coordinates finite element field of supplied name with number of components 1, 2, or 3 and the components named "u", "v" and "w" if used. New field is not managed by default. """ return create_field_finite_element(fieldmodule, name, components_count, component_names=("u", "v", "w"), managed=managed, type_coordinate=True) def find_or_create_field_texture_coordinates(fieldmodule: Fieldmodule, name="texture coordinates", components_count=3, managed=True) -> FieldFiniteElement: """ Create texture coordinates finite element field of supplied name with number of components 1, 2, or 3 and the components named "u", "v" and "w" if used. New field is managed by default. """ assert 1 <= components_count <= 3 return find_or_create_field_finite_element(fieldmodule, name, components_count, component_names=("u", "v", "w"), managed=managed, type_coordinate=True) def create_field_stored_mesh_location(fieldmodule: Fieldmodule, mesh: Mesh, name=None, managed=False)\ -> FieldStoredMeshLocation: """ Create a stored mesh location field for storing locations in the supplied mesh, used for storing data projections. New field is not managed by default. :param fieldmodule: Zinc fieldmodule to find or create field in. :param mesh: Mesh to store locations in, from same fieldmodule. :param name: Name of new field. If not defined, defaults to "location_" + mesh.getName(). :param managed: Managed state of field. :return: Zinc FieldStoredMeshLocation """ if not name: name = "location_" + mesh.getName() with ChangeManager(fieldmodule): mesh_location_field = fieldmodule.createFieldStoredMeshLocation(mesh) mesh_location_field.setName(name) mesh_location_field.setManaged(managed) return mesh_location_field def find_or_create_field_stored_mesh_location(fieldmodule: Fieldmodule, mesh: Mesh, name=None, managed=True)\ -> FieldStoredMeshLocation: """ Get or create a stored mesh location field for storing locations in the supplied mesh, used for storing data projections. Note can't currently verify existing field stores locations in the supplied mesh. New field is managed by default. :param fieldmodule: Zinc fieldmodule to find or create field in. :param mesh: Mesh to store locations in, from same fieldmodule. :param name: Name of new field. If not defined, defaults to "location_" + mesh.getName(). :param managed: Managed state of field if created here. """ if not name: name = "location_" + mesh.getName() field = fieldmodule.findFieldByName(name) # StoredMeshLocation field can only have 1 component; its value is an element + xi coordinates if field_exists(fieldmodule, name, 'StoredMeshLocation', 1): mesh_location_field = field.castStoredMeshLocation() return mesh_location_field return create_field_stored_mesh_location(fieldmodule, mesh, name=name, managed=managed) def create_field_stored_string(fieldmodule: Fieldmodule, name="name", managed=False) -> Field: """ Creates a stored string field for defining names on nodes or datapoints. New field is not managed by default. :param fieldmodule: Zinc fieldmodule to find or create field in. :param name: Name of field to find or create. :param managed: Managed state of field if created here. :return: Zinc Field. """ with ChangeManager(fieldmodule): stored_string_field = fieldmodule.createFieldStoredString() stored_string_field.setName(name) stored_string_field.setManaged(managed) return stored_string_field def find_or_create_field_stored_string(fieldmodule: Fieldmodule, name="name", managed=True) -> Field: """ Finds or creates a stored string field for defining names on nodes or datapoints. Note can't use Field.castStoredString API as not released. New field is managed by default. :param fieldmodule: Zinc fieldmodule to find or create field in. :param name: Name of field to find or create. :param managed: Managed state of field if created here. :return: Zinc Field. """ field = fieldmodule.findFieldByName(name) if field.isValid(): if field.getValueType() == Field.VALUE_TYPE_STRING: return field return create_field_stored_string(fieldmodule, name, managed=managed) def get_unique_field_name(fieldmodule: Fieldmodule, name: str) -> str: """ Return a unique field name in fieldmodule either equal to name or appending a number starting at 1 and increasing. :param fieldmodule: The fieldmodule to get a unique name in. :param name: The name to match or append a number to. """ field = fieldmodule.findFieldByName(name) if not field.isValid(): return name number = 1 while True: next_name = name + str(number) field = fieldmodule.findFieldByName(next_name) if not field.isValid(): return next_name number += 1 def orphan_field_by_name(fieldmodule: Fieldmodule, name: str): """ Find existing field with the name in fieldmodule. If it exists, uniquely rename it (prefix with ".destroy_" and append unique number) and unmanage it so destroyed when no longer in use. """ field = fieldmodule.findFieldByName(name) if field.isValid(): field.setName(get_unique_field_name(fieldmodule, ".destroy_" + name)) field.setManaged(False) # Create C++ style aliases for names of functions. createFieldsDisplacementGradients = create_fields_displacement_gradients createFieldsTransformations = create_fields_transformations createFieldEulerAnglesRotationMatrix = create_field_euler_angles_rotation_matrix createFieldFiniteElementClone = create_field_finite_element_clone createFieldMeshIntegral = create_field_mesh_integral createFieldVolumeImage = create_field_volume_image createFieldPlaneVisibility = create_field_plane_visibility createFieldVisibilityForPlane = create_field_visibility_for_plane createFieldIsoScalarForPlane = create_field_iso_scalar_for_plane createFieldImage = create_field_image createFieldCoordinates = create_field_coordinates createFieldFibres = create_field_fibres createFieldFiniteElement = create_field_finite_element createFieldGroup = create_field_group createFieldStoredMeshLocation = create_field_stored_mesh_location createFieldStoredString = create_field_stored_string createFieldTextureCoordinates = create_field_texture_coordinates getGroupList = get_group_list getManagedFieldNames = get_managed_field_names findOrCreateFieldCoordinates = find_or_create_field_coordinates findOrCreateFieldFiniteElement = find_or_create_field_finite_element findOrCreateFieldFibres = find_or_create_field_fibres findOrCreateFieldGroup = find_or_create_field_group findOrCreateFieldNodeGroup = find_or_create_field_node_group findOrCreateFieldStoredMeshLocation = find_or_create_field_stored_mesh_location findOrCreateFieldStoredString = find_or_create_field_stored_string findOrCreateFieldTextureCoordinates = find_or_create_field_texture_coordinates getUniqueFieldName = get_unique_field_name orphanFieldByName = orphan_field_by_name fieldIsManagedCoordinates = field_is_managed_coordinates fieldIsManagedGroup = field_is_managed_group assignFieldParameters = assign_field_parameters fieldExists = field_exists
OpenCMISS-Bindings/opencmiss.utils
src/opencmiss/utils/zinc/field.py
Python
apache-2.0
31,320
[ "Gaussian" ]
5604c71f6112a6f6c67e19e511ec215d6b028ab8b35c3571d00109946dc3976c
#!/usr/bin/env python from __future__ import print_function from rdkit import Chem from rdkit.Chem import RWMol from rdkit.Chem import AllChem import mol_io as io import topomod import math import random # xyz,atom,charge,spin = react.GenReaction(FileName,pro1Name,pro2Name) def GenReaction(EdName,Pro1Name,Pro2Name): EdFile = EdName + ".mol" Pro1File = Pro1Name + ".mol" Pro2File = Pro2Name + ".mol" edmol = Chem.MolFromMolFile(EdFile,sanitize=True,removeHs=False) pro1mol = Chem.MolFromMolFile(Pro1File,sanitize=True,removeHs=False) pro2mol = Chem.MolFromMolFile(Pro2File,sanitize=True,removeHs=False) print(EdFile) print(Pro1File) print(Pro2File) # print(Chem.MolToMolBlock(edmol)) # print(Chem.MolToMolBlock(pro1mol)) # print(Chem.MolToMolBlock(pro2mol)) xyzed,atomed,charge,spin = io.ReadMol(EdName) xyzpro1,atompro1,charge,spin = io.ReadMol(Pro1Name) xyzpro2,atompro2,charge,spin = io.ReadMol(Pro2Name) xyzed_new = [] atomed_new = [] # io.PrintXYZ('Educt',atomed,xyzed) # Align first product to molecule bestpair = [] bestrmsd = 100 ifit = 0 while ifit < 10000: # repeatedly tests random atom-pairlists. probably not the smartest way ifit += 1 pairlist1 = range(len(atompro1)) atomlist = range(len(atompro1)) random.shuffle(atomlist) flags = [False]*len(atomed) weightlist = [1]*len(atompro1) # print(atomlist) for j in atomlist: for i in range(len(atomed)): if(atomed[i] != atompro1[j]): continue elif not flags[i]: pairlist1[j] = [j,i] flags[i] = True break # print(pairlist1) if len(pairlist1) == 1: # if the fragment is a single atom rd-kits AlignMol fails. Simply move atom to corresponding location xyzpro1[0] = xyzed[pairlist1[0][1]] xyzed_new.append(xyzed[pairlist1[0][1]]) atomed_new.append(atomed[pairlist1[0][1]]) ifit = 1000000 else: for i in range(len(pairlist1)): weightlist[i] = pro1mol.GetAtomWithIdx(i).GetAtomicNum()**2 rmsd = AllChem.AlignMol(pro1mol,edmol,atomMap=pairlist1,weights=weightlist) # print(rmsd) if rmsd<bestrmsd: bestrmsd = rmsd bestpair = pairlist1 if len(pairlist1) > 1: # realign with best pairlist. also, write to new educt geo pairlist1 = bestpair for i in range(len(pairlist1)): weightlist[i] = pro1mol.GetAtomWithIdx(i).GetAtomicNum()**2 xyzed_new.append(xyzed[pairlist1[i][1]]) atomed_new.append(atomed[pairlist1[i][1]]) rmsd = AllChem.AlignMol(pro1mol,edmol,atomMap=pairlist1,weights=weightlist) print(rmsd) print(Chem.MolToMolBlock(pro1mol),file=open('pro1mol.mol','w+')) xyzpro1,atompro1,charge,spin = io.ReadMol('pro1mol') # io.PrintXYZ('Product1',atompro1,xyzpro1) # print(Chem.MolToMolBlock(edmol),file=open('edmol.mol','w+')) # print(Chem.MolToMolBlock(pro1mol),file=open('pro1mol.mol','w+')) # Align second product to remaining framework. Same as above bestpair = [] bestrmsd = 100 ifit = 0 while ifit < 10000: ifit += 1 pairlist2 = range(len(atompro2)) atomlist = range(len(atompro2)) random.shuffle(atomlist) flags = [False]*len(atomed) weightlist = [1]*len(atompro2) # print(atomlist) for j in atomlist: for i in range(len(atomed)): if(atomed[i] != atompro2[j]): continue elif any(x[1] == i for x in pairlist1): # skip atoms already used in prev step. continue elif not flags[i]: pairlist2[j] = [j,i] flags[i] = True break # print(pairlist1) if len(pairlist2) == 1: xyzpro2[0] = xyzed[pairlist2[0][1]] xyzed_new.append(xyzed[pairlist2[0][1]]) atomed_new.append(atomed[pairlist2[0][1]]) ifit = 1000000 else: for i in range(len(pairlist2)): weightlist[i] = pro2mol.GetAtomWithIdx(i).GetAtomicNum()**2 rmsd = AllChem.AlignMol(pro2mol,edmol,atomMap=pairlist2,weights=weightlist) # print(rmsd) if rmsd<bestrmsd: bestrmsd = rmsd bestpair = pairlist2 if len(pairlist2) > 1: pairlist2 = bestpair for i in range(len(pairlist2)): weightlist[i] = pro2mol.GetAtomWithIdx(i).GetAtomicNum()**2 xyzed_new.append(xyzed[pairlist2[i][1]]) atomed_new.append(atomed[pairlist2[i][1]]) rmsd = AllChem.AlignMol(pro2mol,edmol,atomMap=pairlist2,weights=weightlist) print(rmsd) print(Chem.MolToMolBlock(pro2mol),file=open('pro2mol.mol','w+')) xyzpro2,atompro2,charge,spin = io.ReadMol('pro2mol') # io.PrintXYZ('Product2',atompro2,xyzpro2) # shift along vector connecting fragments' centers of mass xyzpro1s,xyzpro2s = CoM_shift(atompro1,xyzpro1,atompro2,xyzpro2,4.0) # merge aligned and shifted product geometries atom_diss,xyz_diss,frags = io.MergeXYZ(atompro1,xyzpro1,atompro2,xyzpro2) atom_shift,xyz_shift,frags = io.MergeXYZ(atompro1,xyzpro1s,atompro2,xyzpro2s) io.PrintXYZ('Reordered Educt',atomed_new,xyzed_new) # io.PrintXYZ('Merged Products',atom_diss,xyz_diss) io.PrintXYZ('Shifted Products',atom_shift,xyz_shift) io.PrintAimsGeo('Reordered Educt',atomed_new,xyzed_new) io.PrintAimsGeo('Shifted Products',atom_shift,xyz_shift) return xyzed,atomed,charge,spin def CoM_shift(atom1,xyz1,atom2,xyz2,shift): #calculate center of mass 1 sumM = 0.0 CoM1 = [0.0, 0.0, 0.0] for i in range(len(xyz1)): mi = Mass(atom1[i]) sumM += mi CoM1[0] += xyz1[i][0]*mi CoM1[1] += xyz1[i][1]*mi CoM1[2] += xyz1[i][2]*mi CoM1[0] /= sumM CoM1[1] /= sumM CoM1[2] /= sumM #calculate center of mass 1 sumM = 0.0 CoM2 = [0.0, 0.0, 0.0] for i in range(len(xyz2)): mi = Mass(atom2[i]) sumM += mi CoM2[0] += xyz2[i][0]*mi CoM2[1] += xyz2[i][1]*mi CoM2[2] += xyz2[i][2]*mi CoM2[0] /= sumM CoM2[1] /= sumM CoM2[2] /= sumM # normalized vector from 1 to 2 vect = [CoM1[0]-CoM2[0],CoM1[1]-CoM2[1],CoM1[2]-CoM2[2]] norm = math.sqrt(vect[0]**2+vect[1]**2+vect[2]**2) vect[0] /= norm vect[1] /= norm vect[2] /= norm norm = math.sqrt(vect[0]**2+vect[1]**2+vect[2]**2) print(norm) # calculate shifted coordinates xyz1_new = [] for coords in xyz1: # xyz1_new.append([coords[0] + vect[0]*shift,coords[1] + vect[1]*shift,coords[2] + vect[2]*shift]) xyz1_new.append([coords[0],coords[1],coords[2]]) xyz2_new = [] for coords in xyz2: xyz2_new.append([coords[0] - vect[0]*shift,coords[1] - vect[1]*shift,coords[2] - vect[2]*shift]) return xyz1_new,xyz2_new def Mass(El): if(El=='H'): return 1.01 elif(El=='He'): return 4.0 elif(El=='Li'): return 6.94 elif(El=='Be'): return 9.01 elif(El=='B'): return 10.81 elif(El=='C'): return 12.01 elif(El=='N'): return 14.01 elif(El=='O'): return 16.00 elif(El=='F'): return 19.00 elif(El=='Ne'): return 20.18 else: print("mass unknown")
jmargraf/agrippa
inputgen/react.py
Python
gpl-3.0
7,695
[ "RDKit" ]
a02faa3dde50ea6d087d5091189b78783090cec2a6d6bb43cdf1cd1fc446eb27
""" Module that contains client access to the WMSAdministrator handler. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from DIRAC.Core.Base.Client import Client, createClient @createClient('WorkloadManagement/WMSAdministrator') class WMSAdministratorClient(Client): """JobManagerClient sets url for the WMSAdministratorHandler. """ def __init__(self, url=None, **kwargs): """ Sets URL for WMSAdministrator handler :param self: self reference :param url: url of the WMSAdministratorHandler :param kwargs: forwarded to the Base Client class """ super(WMSAdministratorClient, self).__init__(**kwargs) if not url: self.serverURL = 'WorkloadManagement/WMSAdministrator' else: self.serverURL = url
yujikato/DIRAC
src/DIRAC/WorkloadManagementSystem/Client/WMSAdministratorClient.py
Python
gpl-3.0
820
[ "DIRAC" ]
d2a5e849edbd6b2999563d7d77fd720c151aff75a60928ef30a18eb69868f597
from __future__ import unicode_literals """ This package implements various Nwchem Jobs and Error Handlers. """ __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "ongsp@ucsd.edu" __status__ = "Beta" __date__ = "6/17/13"
specter119/custodian
custodian/nwchem/__init__.py
Python
mit
324
[ "NWChem" ]
3c9191a8f19337c35d42b9b4093d40e19d8f56e478da258c61cce6aeb6df659d
#! /usr/bin/env python3 # -*- coding: utf-8 -*- """ Mathematical constants and functions extending the functionality available from OpenCL directly. @author: Sebastian M. Gaebel @email: sebastian.gaebel@ligo.org """ # TODO: Other potentially useful stuff: # * integration routine # * interpolation in 1D, maybe more # * KDE # * median # * percentile # * sorting # %% Interpolation interpolation_template = """ """ # %% Math Macros # TODO: Useful constants not included by default in OpenCL math_constants = """ #define M_1_SQRTPI 0.56418958354775628 #define M_1_SQRT2PI 0.3989422804014327 #define M_LOG_1_SQRTPI -0.57236494292470008 #define M_LOG_1_SQRT2PI -0.91893853320467267 #define M_SQRT_2_PI 0.79788456080286541 #define M_LOG_SQRT_2_PI -0.22579135264472738 #ifndef M_1_SQRT2 #define M_1_SQRT2 0.70710678118654757 #endif """ # %% Math templates sum_template = """ cdouble sum(__global const cdouble *, const size_t); cdouble sum(__global const cdouble * iterable, const size_t length) { cdouble accumulator = 0; for(size_t i = 0; i < length; i++) { accumulator += iterable[i]; } return accumulator; } """ product_template = """ cdouble product(__global const cdouble *, const size_t); cdouble product(__global const cdouble * iterable, const size_t length) { cdouble accumulator = 1; for(size_t i = 0; i < length; i++) { accumulator *= iterable[i]; } return accumulator; } """ logsumexp_template = """ cdouble logsumexp(__global const cdouble *, const size_t); cdouble logsumexp(__global const cdouble * log_values, size_t length) { cdouble max_value = -INFINITY; for(size_t i = 0; i < length; i++) { max_value = fmax(max_value, log_values[i]); } cdouble accumulator = 0; for(size_t i = 0; i < length; i++) { accumulator += exp(log_values[i] - max_value); } return log(accumulator) + max_value; } """ logaddexp_template = """ cdouble logaddexp(const cdouble, const cdouble); cdouble logaddexp(const cdouble x, const cdouble y) { return fmax(x, y) + log1p(exp(-fabs(x - y))); } """ mean_template = """ cdouble mean(__global const cdouble *, const size_t); cdouble mean(__global const cdouble * iterable, const size_t length) { cdouble accumulator = 0; for(size_t i = 0; i < length; i++) { accumulator += iterable[i]; } return accumulator / length; } """ stddev_template = """ cdouble stddev(__global const cdouble *, const size_t); cdouble stddev(__global const cdouble * iterable, const size_t length) { const cdouble mean_value = mean(iterable, length); cdouble accumulator = 0; for(size_t i = 0; i < length; i++) { accumulator += pown(iterable[i] - mean_value, 2); } return sqrt(accumulator / length); } """ min_template = """ cdouble iter_min(__global const cdouble *, const size_t); cdouble iter_min(__global const cdouble * iterable, const size_t length) { cdouble current_min = iterable[0]; for(size_t i = 1; i < length; i++) { current_min = fmin(current_min, iterable[i]); } return current_min; } """ max_template = """ cdouble iter_max(__global const cdouble *, const size_t); cdouble iter_max(__global const cdouble * iterable, const size_t length) { cdouble current_max = iterable[0]; for(size_t i = 1; i < length; i++) { current_max = fmax(current_max, iterable[i]); } return current_max; } """ # %% Distribution templates # TODO: Add normed and/or log versions? # TODO: Other distributions? chi-squared, lognormal # TODO: Higher dimensions (esp. gaussian)? gaussian_pdf_templates = """ cdouble gaussian(const cdouble, const cdouble, const cdouble); cdouble log_gaussian(const cdouble, const cdouble, const cdouble); cdouble gaussian(const cdouble value, const cdouble mean, const cdouble stddev) { return M_1_SQRT2PI * exp(-0.5 * pown((value - mean) / stddev, 2)) / stddev; } cdouble log_gaussian(const cdouble value, const cdouble mean, const cdouble stddev) { return M_LOG_1_SQRT2PI - 0.5 * pown((value - mean) / stddev, 2) - log(stddev); } """ trunc_gaussian_pdf_templates = """ cdouble trunc_gaussian(const cdouble, const cdouble, const cdouble, const cdouble, const cdouble); cdouble log_trunc_gaussian(const cdouble, const cdouble, const cdouble, const cdouble, const cdouble); cdouble trunc_gaussian(const cdouble value, const cdouble mean, const cdouble stddev, const cdouble low, const cdouble high) { if(value < low || value > high) { return 0.; } const cdouble inv_stddev = 1.0 / stddev; const cdouble erf_L = erf((low - mean) * M_1_SQRT2 * inv_stddev); const cdouble erf_H = erf((high - mean) * M_1_SQRT2 * inv_stddev); return M_SQRT_2_PI * inv_stddev * exp(-0.5 * pown((value - mean) * inv_stddev, 2)) / (erf_H - erf_L); } cdouble log_trunc_gaussian(const cdouble value, const cdouble mean, const cdouble stddev, const cdouble low, const cdouble high) { if(value < low || value > high) { return -INFINITY; } const cdouble inv_stddev = 1.0 / stddev; const cdouble log_sqrt_2_pi = -0.22579135264472741; return log_sqrt_2_pi - log(stddev) - 0.5 * pown((value - mean) * inv_stddev, 2) - log(erf((high - mean) * M_1_SQRT2 * inv_stddev) - erf((low - mean) * M_1_SQRT2 * inv_stddev)); } """ power_law_templates = """ cdouble power_law(const cdouble, const cdouble, const cdouble, const cdouble); cdouble power_law_falling(const cdouble, const cdouble, const cdouble); cdouble log_power_law(const cdouble, const cdouble, const cdouble, const cdouble); cdouble log_power_law_falling(const cdouble, const cdouble, const cdouble); cdouble power_law(const cdouble value, const cdouble slope, const cdouble low, const cdouble high) { if((value < low) || (value > high)) { return 0.; } else if(slope == -1.) { return 1. / (value * (log(high) - log(low))); } return pow(value, slope) * (1.+slope) / (pow(high, 1.+slope) - pow(low, 1.+slope)); } cdouble power_law_falling(const cdouble value, const cdouble slope, const cdouble cutoff) { if(value < cutoff) { return 0.; } return pow(value, slope) * (-1.-slope) * pow(cutoff, -1.-slope); } cdouble log_power_law(const cdouble value, const cdouble slope, const cdouble low, const cdouble high) { if((value < low) || (value > high)) { return -INFINITY; } else if(slope == -1.) { return -log(value) - log(log(high) - log(low)); } else if(slope < -1.) { return log(-1.-slope) + slope * log(value) - log(pow(low, 1.+slope) - pow(high, 1.+slope)); } else { return log(1.+slope) + slope * log(value) - log(pow(high, 1.+slope) - pow(low, 1.+slope)); } } cdouble log_power_law_falling(const cdouble value, const cdouble slope, const cdouble cutoff) { if(value < cutoff) { return -INFINITY; } return log(-1.-slope) + slope * log(value) - (1.+slope) * log(cutoff); } """ def basic_code(): return '\n'.join([math_constants, sum_template, product_template, logsumexp_template, logaddexp_template, mean_template, stddev_template, min_template, max_template, gaussian_pdf_templates, trunc_gaussian_pdf_templates, power_law_templates])
sgaebel/GAPS
gaps/auxiliary_sources.py
Python
mit
7,372
[ "Gaussian" ]
1eada400fccfc1e5bc07fb39d152e9a9912c0e4ce12feec6175fbfea8c356943
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base classes for probability distributions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import contextlib import types import numpy as np import six from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import kullback_leibler from tensorflow.python.ops.distributions import util from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import tf_export __all__ = [ "ReparameterizationType", "FULLY_REPARAMETERIZED", "NOT_REPARAMETERIZED", "Distribution", ] _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [ "batch_shape", "batch_shape_tensor", "cdf", "covariance", "cross_entropy", "entropy", "event_shape", "event_shape_tensor", "kl_divergence", "log_cdf", "log_prob", "log_survival_function", "mean", "mode", "prob", "sample", "stddev", "survival_function", "variance", ] @six.add_metaclass(abc.ABCMeta) class _BaseDistribution(object): """Abstract base class needed for resolving subclass hierarchy.""" pass def _copy_fn(fn): """Create a deep copy of fn. Args: fn: a callable Returns: A `FunctionType`: a deep copy of fn. Raises: TypeError: if `fn` is not a callable. """ if not callable(fn): raise TypeError("fn is not callable: %s" % fn) # The blessed way to copy a function. copy.deepcopy fails to create a # non-reference copy. Since: # types.FunctionType == type(lambda: None), # and the docstring for the function type states: # # function(code, globals[, name[, argdefs[, closure]]]) # # Create a function object from a code object and a dictionary. # ... # # Here we can use this to create a new function with the old function's # code, globals, closure, etc. return types.FunctionType( code=fn.__code__, globals=fn.__globals__, name=fn.__name__, argdefs=fn.__defaults__, closure=fn.__closure__) def _update_docstring(old_str, append_str): """Update old_str by inserting append_str just before the "Args:" section.""" old_str = old_str or "" old_str_lines = old_str.split("\n") # Step 0: Prepend spaces to all lines of append_str. This is # necessary for correct markdown generation. append_str = "\n".join(" %s" % line for line in append_str.split("\n")) # Step 1: Find mention of "Args": has_args_ix = [ ix for ix, line in enumerate(old_str_lines) if line.strip().lower() == "args:"] if has_args_ix: final_args_ix = has_args_ix[-1] return ("\n".join(old_str_lines[:final_args_ix]) + "\n\n" + append_str + "\n\n" + "\n".join(old_str_lines[final_args_ix:])) else: return old_str + "\n\n" + append_str class _DistributionMeta(abc.ABCMeta): def __new__(mcs, classname, baseclasses, attrs): """Control the creation of subclasses of the Distribution class. The main purpose of this method is to properly propagate docstrings from private Distribution methods, like `_log_prob`, into their public wrappers as inherited by the Distribution base class (e.g. `log_prob`). Args: classname: The name of the subclass being created. baseclasses: A tuple of parent classes. attrs: A dict mapping new attributes to their values. Returns: The class object. Raises: TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or the new class is derived via multiple inheritance and the first parent class is not a subclass of `BaseDistribution`. AttributeError: If `Distribution` does not implement e.g. `log_prob`. ValueError: If a `Distribution` public method lacks a docstring. """ if not baseclasses: # Nothing to be done for Distribution raise TypeError("Expected non-empty baseclass. Does Distribution " "not subclass _BaseDistribution?") which_base = [ base for base in baseclasses if base == _BaseDistribution or issubclass(base, Distribution)] base = which_base[0] if base == _BaseDistribution: # Nothing to be done for Distribution return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs) if not issubclass(base, Distribution): raise TypeError("First parent class declared for %s must be " "Distribution, but saw '%s'" % (classname, base.__name__)) for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS: special_attr = "_%s" % attr class_attr_value = attrs.get(attr, None) if attr in attrs: # The method is being overridden, do not update its docstring continue base_attr_value = getattr(base, attr, None) if not base_attr_value: raise AttributeError( "Internal error: expected base class '%s' to implement method '%s'" % (base.__name__, attr)) class_special_attr_value = attrs.get(special_attr, None) if class_special_attr_value is None: # No _special method available, no need to update the docstring. continue class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value) if not class_special_attr_docstring: # No docstring to append. continue class_attr_value = _copy_fn(base_attr_value) class_attr_docstring = tf_inspect.getdoc(base_attr_value) if class_attr_docstring is None: raise ValueError( "Expected base class fn to contain a docstring: %s.%s" % (base.__name__, attr)) class_attr_value.__doc__ = _update_docstring( class_attr_value.__doc__, ("Additional documentation from `%s`:\n\n%s" % (classname, class_special_attr_docstring))) attrs[attr] = class_attr_value return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs) @tf_export("distributions.ReparameterizationType") class ReparameterizationType(object): """Instances of this class represent how sampling is reparameterized. Two static instances exist in the distributions library, signifying one of two possible properties for samples from a distribution: `FULLY_REPARAMETERIZED`: Samples from the distribution are fully reparameterized, and straight-through gradients are supported. `NOT_REPARAMETERIZED`: Samples from the distribution are not fully reparameterized, and straight-through gradients are either partially unsupported or are not supported at all. In this case, for purposes of e.g. RL or variational inference, it is generally safest to wrap the sample results in a `stop_gradients` call and instead use policy gradients / surrogate loss instead. """ def __init__(self, rep_type): self._rep_type = rep_type def __repr__(self): return "<Reparameteriation Type: %s>" % self._rep_type def __eq__(self, other): """Determine if this `ReparameterizationType` is equal to another. Since RepaparameterizationType instances are constant static global instances, equality checks if two instances' id() values are equal. Args: other: Object to compare against. Returns: `self is other`. """ return self is other # Fully reparameterized distribution: samples from a fully # reparameterized distribution support straight-through gradients with # respect to all parameters. FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED") tf_export("distributions.FULLY_REPARAMETERIZED").export_constant( __name__, "FULLY_REPARAMETERIZED") # Not reparameterized distribution: samples from a non- # reparameterized distribution do not support straight-through gradients for # at least some of the parameters. NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED") tf_export("distributions.NOT_REPARAMETERIZED").export_constant( __name__, "NOT_REPARAMETERIZED") @six.add_metaclass(_DistributionMeta) @tf_export("distributions.Distribution") class Distribution(_BaseDistribution): """A generic probability distribution base class. `Distribution` is a base class for constructing and organizing properties (e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian). #### Subclassing Subclasses are expected to implement a leading-underscore version of the same-named function. The argument signature should be identical except for the omission of `name="..."`. For example, to enable `log_prob(value, name="log_prob")` a subclass should implement `_log_prob(value)`. Subclasses can append to public-level docstrings by providing docstrings for their method specializations. For example: ```python @util.AppendDocstring("Some other details.") def _log_prob(self, value): ... ``` would add the string "Some other details." to the `log_prob` function docstring. This is implemented as a simple decorator to avoid python linter complaining about missing Args/Returns/Raises sections in the partial docstrings. #### Broadcasting, batching, and shapes All distributions support batches of independent distributions of that type. The batch shape is determined by broadcasting together the parameters. The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and `log_prob` reflect this broadcasting, as does the return value of `sample` and `sample_n`. `sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is the shape of the `Tensor` returned from `sample_n`, `n` is the number of samples, `batch_shape` defines how many independent distributions there are, and `event_shape` defines the shape of samples from each of those independent distributions. Samples are independent along the `batch_shape` dimensions, but not necessarily so along the `event_shape` dimensions (depending on the particulars of the underlying distribution). Using the `Uniform` distribution as an example: ```python minval = 3.0 maxval = [[4.0, 6.0], [10.0, 12.0]] # Broadcasting: # This instance represents 4 Uniform distributions. Each has a lower bound at # 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape. u = Uniform(minval, maxval) # `event_shape` is `TensorShape([])`. event_shape = u.event_shape # `event_shape_t` is a `Tensor` which will evaluate to []. event_shape_t = u.event_shape_tensor() # Sampling returns a sample per distribution. `samples` has shape # [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5, # batch_shape=[2, 2], and event_shape=[]. samples = u.sample_n(5) # The broadcasting holds across methods. Here we use `cdf` as an example. The # same holds for `log_cdf` and the likelihood functions. # `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the # shape of the `Uniform` instance. cum_prob_broadcast = u.cdf(4.0) # `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting # occurred. cum_prob_per_dist = u.cdf([[4.0, 5.0], [6.0, 7.0]]) # INVALID as the `value` argument is not broadcastable to the distribution's # shape. cum_prob_invalid = u.cdf([4.0, 5.0, 6.0]) ``` #### Shapes There are three important concepts associated with TensorFlow Distributions shapes: - Event shape describes the shape of a single draw from the distribution; it may be dependent across dimensions. For scalar distributions, the event shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is `[5]`. - Batch shape describes independent, not identically distributed draws, aka a "collection" or "bunch" of distributions. - Sample shape describes independent, identically distributed draws of batches from the distribution family. The event shape and the batch shape are properties of a Distribution object, whereas the sample shape is associated with a specific call to `sample` or `log_prob`. For detailed usage examples of TensorFlow Distributions shapes, see [this tutorial]( https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb) #### Parameter values leading to undefined statistics or distributions. Some distributions do not have well-defined statistics for all initialization parameter values. For example, the beta distribution is parameterized by positive real numbers `concentration1` and `concentration0`, and does not have well-defined mode if `concentration1 < 1` or `concentration0 < 1`. The user is given the option of raising an exception or returning `NaN`. ```python a = tf.exp(tf.matmul(logits, weights_a)) b = tf.exp(tf.matmul(logits, weights_b)) # Will raise exception if ANY batch member has a < 1 or b < 1. dist = distributions.beta(a, b, allow_nan_stats=False) mode = dist.mode().eval() # Will return NaN for batch members with either a < 1 or b < 1. dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior mode = dist.mode().eval() ``` In all cases, an exception is raised if *invalid* parameters are passed, e.g. ```python # Will raise an exception if any Op is run. negative_a = -1.0 * a # beta distribution by definition has a > 0. dist = distributions.beta(negative_a, b, allow_nan_stats=True) dist.mean().eval() ``` """ def __init__(self, dtype, reparameterization_type, validate_args, allow_nan_stats, parameters=None, graph_parents=None, name=None): """Constructs the `Distribution`. **This is a private method for subclass use.** Args: dtype: The type of the event samples. `None` implies no type-enforcement. reparameterization_type: Instance of `ReparameterizationType`. If `distributions.FULLY_REPARAMETERIZED`, this `Distribution` can be reparameterized in terms of some standard distribution with a function whose Jacobian is constant for the support of the standard distribution. If `distributions.NOT_REPARAMETERIZED`, then no such reparameterization is available. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. parameters: Python `dict` of parameters used to instantiate this `Distribution`. graph_parents: Python `list` of graph prerequisites of this `Distribution`. name: Python `str` name prefixed to Ops created by this class. Default: subclass name. Raises: ValueError: if any member of graph_parents is `None` or not a `Tensor`. """ graph_parents = [] if graph_parents is None else graph_parents for i, t in enumerate(graph_parents): if t is None or not tensor_util.is_tensor(t): raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t)) if not name or name[-1] != "/": # `name` is not a name scope non_unique_name = name or type(self).__name__ with ops.name_scope(non_unique_name) as name: pass self._dtype = dtype self._reparameterization_type = reparameterization_type self._allow_nan_stats = allow_nan_stats self._validate_args = validate_args self._parameters = parameters or {} self._graph_parents = graph_parents self._name = name @classmethod def param_shapes(cls, sample_shape, name="DistributionParamShapes"): """Shapes of parameters given the desired shape of a call to `sample()`. This is a class method that describes what key/value arguments are required to instantiate the given `Distribution` so that a particular shape is returned for that instance's call to `sample()`. Subclasses should override class method `_param_shapes`. Args: sample_shape: `Tensor` or python list/tuple. Desired shape of a call to `sample()`. name: name to prepend ops with. Returns: `dict` of parameter name to `Tensor` shapes. """ with ops.name_scope(name, values=[sample_shape]): return cls._param_shapes(sample_shape) @classmethod def param_static_shapes(cls, sample_shape): """param_shapes with static (i.e. `TensorShape`) shapes. This is a class method that describes what key/value arguments are required to instantiate the given `Distribution` so that a particular shape is returned for that instance's call to `sample()`. Assumes that the sample's shape is known statically. Subclasses should override class method `_param_shapes` to return constant-valued tensors when constant values are fed. Args: sample_shape: `TensorShape` or python list/tuple. Desired shape of a call to `sample()`. Returns: `dict` of parameter name to `TensorShape`. Raises: ValueError: if `sample_shape` is a `TensorShape` and is not fully defined. """ if isinstance(sample_shape, tensor_shape.TensorShape): if not sample_shape.is_fully_defined(): raise ValueError("TensorShape sample_shape must be fully defined") sample_shape = sample_shape.as_list() params = cls.param_shapes(sample_shape) static_params = {} for name, shape in params.items(): static_shape = tensor_util.constant_value(shape) if static_shape is None: raise ValueError( "sample_shape must be a fully-defined TensorShape or list/tuple") static_params[name] = tensor_shape.TensorShape(static_shape) return static_params @staticmethod def _param_shapes(sample_shape): raise NotImplementedError("_param_shapes not implemented") @property def name(self): """Name prepended to all ops created by this `Distribution`.""" return self._name @property def dtype(self): """The `DType` of `Tensor`s handled by this `Distribution`.""" return self._dtype @property def parameters(self): """Dictionary of parameters used to instantiate this `Distribution`.""" # Remove "self", "__class__", or other special variables. These can appear # if the subclass used: # `parameters = dict(locals())`. return dict((k, v) for k, v in self._parameters.items() if not k.startswith("__") and k != "self") @property def reparameterization_type(self): """Describes how samples from the distribution are reparameterized. Currently this is one of the static instances `distributions.FULLY_REPARAMETERIZED` or `distributions.NOT_REPARAMETERIZED`. Returns: An instance of `ReparameterizationType`. """ return self._reparameterization_type @property def allow_nan_stats(self): """Python `bool` describing behavior when a stat is undefined. Stats return +/- infinity when it makes sense. E.g., the variance of a Cauchy distribution is infinity. However, sometimes the statistic is undefined, e.g., if a distribution's pdf does not achieve a maximum within the support of the distribution, the mode is undefined. If the mean is undefined, then by definition the variance is undefined. E.g. the mean for Student's T for df = 1 is undefined (no clear way to say it is either + or - infinity), so the variance = E[(X - mean)**2] is also undefined. Returns: allow_nan_stats: Python `bool`. """ return self._allow_nan_stats @property def validate_args(self): """Python `bool` indicating possibly expensive checks are enabled.""" return self._validate_args def copy(self, **override_parameters_kwargs): """Creates a deep copy of the distribution. Note: the copy distribution may continue to depend on the original initialization arguments. Args: **override_parameters_kwargs: String/value dictionary of initialization arguments to override with new values. Returns: distribution: A new instance of `type(self)` initialized from the union of self.parameters and override_parameters_kwargs, i.e., `dict(self.parameters, **override_parameters_kwargs)`. """ parameters = dict(self.parameters, **override_parameters_kwargs) return type(self)(**parameters) def _batch_shape_tensor(self): raise NotImplementedError("batch_shape_tensor is not implemented") def batch_shape_tensor(self, name="batch_shape_tensor"): """Shape of a single sample from a single event index as a 1-D `Tensor`. The batch dimensions are indexes into independent, non-identical parameterizations of this distribution. Args: name: name to give to the op Returns: batch_shape: `Tensor`. """ with self._name_scope(name): if self.batch_shape.is_fully_defined(): return ops.convert_to_tensor(self.batch_shape.as_list(), dtype=dtypes.int32, name="batch_shape") return self._batch_shape_tensor() def _batch_shape(self): return tensor_shape.TensorShape(None) @property def batch_shape(self): """Shape of a single sample from a single event index as a `TensorShape`. May be partially defined or unknown. The batch dimensions are indexes into independent, non-identical parameterizations of this distribution. Returns: batch_shape: `TensorShape`, possibly unknown. """ return tensor_shape.as_shape(self._batch_shape()) def _event_shape_tensor(self): raise NotImplementedError("event_shape_tensor is not implemented") def event_shape_tensor(self, name="event_shape_tensor"): """Shape of a single sample from a single batch as a 1-D int32 `Tensor`. Args: name: name to give to the op Returns: event_shape: `Tensor`. """ with self._name_scope(name): if self.event_shape.is_fully_defined(): return ops.convert_to_tensor(self.event_shape.as_list(), dtype=dtypes.int32, name="event_shape") return self._event_shape_tensor() def _event_shape(self): return tensor_shape.TensorShape(None) @property def event_shape(self): """Shape of a single sample from a single batch as a `TensorShape`. May be partially defined or unknown. Returns: event_shape: `TensorShape`, possibly unknown. """ return tensor_shape.as_shape(self._event_shape()) def is_scalar_event(self, name="is_scalar_event"): """Indicates that `event_shape == []`. Args: name: Python `str` prepended to names of ops created by this function. Returns: is_scalar_event: `bool` scalar `Tensor`. """ with self._name_scope(name): return ops.convert_to_tensor( self._is_scalar_helper(self.event_shape, self.event_shape_tensor), name="is_scalar_event") def is_scalar_batch(self, name="is_scalar_batch"): """Indicates that `batch_shape == []`. Args: name: Python `str` prepended to names of ops created by this function. Returns: is_scalar_batch: `bool` scalar `Tensor`. """ with self._name_scope(name): return ops.convert_to_tensor( self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor), name="is_scalar_batch") def _sample_n(self, n, seed=None): raise NotImplementedError("sample_n is not implemented") def _call_sample_n(self, sample_shape, seed, name, **kwargs): with self._name_scope(name, values=[sample_shape]): sample_shape = ops.convert_to_tensor( sample_shape, dtype=dtypes.int32, name="sample_shape") sample_shape, n = self._expand_sample_shape_to_vector( sample_shape, "sample_shape") samples = self._sample_n(n, seed, **kwargs) batch_event_shape = array_ops.shape(samples)[1:] final_shape = array_ops.concat([sample_shape, batch_event_shape], 0) samples = array_ops.reshape(samples, final_shape) samples = self._set_sample_static_shape(samples, sample_shape) return samples def sample(self, sample_shape=(), seed=None, name="sample"): """Generate samples of the specified shape. Note that a call to `sample()` without arguments will generate a single sample. Args: sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples. seed: Python integer seed for RNG name: name to give to the op. Returns: samples: a `Tensor` with prepended dimensions `sample_shape`. """ return self._call_sample_n(sample_shape, seed, name) def _log_prob(self, value): raise NotImplementedError("log_prob is not implemented") def _call_log_prob(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = ops.convert_to_tensor(value, name="value") try: return self._log_prob(value, **kwargs) except NotImplementedError: return math_ops.log(self._prob(value, **kwargs)) def log_prob(self, value, name="log_prob"): """Log probability density/mass function. Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_log_prob(value, name) def _prob(self, value): raise NotImplementedError("prob is not implemented") def _call_prob(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = ops.convert_to_tensor(value, name="value") try: return self._prob(value, **kwargs) except NotImplementedError: return math_ops.exp(self._log_prob(value, **kwargs)) def prob(self, value, name="prob"): """Probability density/mass function. Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_prob(value, name) def _log_cdf(self, value): raise NotImplementedError("log_cdf is not implemented") def _call_log_cdf(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = ops.convert_to_tensor(value, name="value") try: return self._log_cdf(value, **kwargs) except NotImplementedError: return math_ops.log(self._cdf(value, **kwargs)) def log_cdf(self, value, name="log_cdf"): """Log cumulative distribution function. Given random variable `X`, the cumulative distribution function `cdf` is: ```none log_cdf(x) := Log[ P[X <= x] ] ``` Often, a numerical approximation can be used for `log_cdf(x)` that yields a more accurate answer than simply taking the logarithm of the `cdf` when `x << -1`. Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_log_cdf(value, name) def _cdf(self, value): raise NotImplementedError("cdf is not implemented") def _call_cdf(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = ops.convert_to_tensor(value, name="value") try: return self._cdf(value, **kwargs) except NotImplementedError: return math_ops.exp(self._log_cdf(value, **kwargs)) def cdf(self, value, name="cdf"): """Cumulative distribution function. Given random variable `X`, the cumulative distribution function `cdf` is: ```none cdf(x) := P[X <= x] ``` Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_cdf(value, name) def _log_survival_function(self, value): raise NotImplementedError("log_survival_function is not implemented") def _call_log_survival_function(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = ops.convert_to_tensor(value, name="value") try: return self._log_survival_function(value, **kwargs) except NotImplementedError: return math_ops.log1p(-self.cdf(value, **kwargs)) def log_survival_function(self, value, name="log_survival_function"): """Log survival function. Given random variable `X`, the survival function is defined: ```none log_survival_function(x) = Log[ P[X > x] ] = Log[ 1 - P[X <= x] ] = Log[ 1 - cdf(x) ] ``` Typically, different numerical approximations can be used for the log survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`. Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_log_survival_function(value, name) def _survival_function(self, value): raise NotImplementedError("survival_function is not implemented") def _call_survival_function(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = ops.convert_to_tensor(value, name="value") try: return self._survival_function(value, **kwargs) except NotImplementedError: return 1. - self.cdf(value, **kwargs) def survival_function(self, value, name="survival_function"): """Survival function. Given random variable `X`, the survival function is defined: ```none survival_function(x) = P[X > x] = 1 - P[X <= x] = 1 - cdf(x). ``` Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_survival_function(value, name) def _entropy(self): raise NotImplementedError("entropy is not implemented") def entropy(self, name="entropy"): """Shannon entropy in nats.""" with self._name_scope(name): return self._entropy() def _mean(self): raise NotImplementedError("mean is not implemented") def mean(self, name="mean"): """Mean.""" with self._name_scope(name): return self._mean() def _quantile(self, value): raise NotImplementedError("quantile is not implemented") def _call_quantile(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = ops.convert_to_tensor(value, name="value") return self._quantile(value, **kwargs) def quantile(self, value, name="quantile"): """Quantile function. Aka "inverse cdf" or "percent point function". Given random variable `X` and `p in [0, 1]`, the `quantile` is: ```none quantile(p) := x such that P[X <= x] == p ``` Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_quantile(value, name) def _variance(self): raise NotImplementedError("variance is not implemented") def variance(self, name="variance"): """Variance. Variance is defined as, ```none Var = E[(X - E[X])**2] ``` where `X` is the random variable associated with this distribution, `E` denotes expectation, and `Var.shape = batch_shape + event_shape`. Args: name: Python `str` prepended to names of ops created by this function. Returns: variance: Floating-point `Tensor` with shape identical to `batch_shape + event_shape`, i.e., the same shape as `self.mean()`. """ with self._name_scope(name): try: return self._variance() except NotImplementedError: return math_ops.square(self._stddev()) def _stddev(self): raise NotImplementedError("stddev is not implemented") def stddev(self, name="stddev"): """Standard deviation. Standard deviation is defined as, ```none stddev = E[(X - E[X])**2]**0.5 ``` where `X` is the random variable associated with this distribution, `E` denotes expectation, and `stddev.shape = batch_shape + event_shape`. Args: name: Python `str` prepended to names of ops created by this function. Returns: stddev: Floating-point `Tensor` with shape identical to `batch_shape + event_shape`, i.e., the same shape as `self.mean()`. """ with self._name_scope(name): try: return self._stddev() except NotImplementedError: return math_ops.sqrt(self._variance()) def _covariance(self): raise NotImplementedError("covariance is not implemented") def covariance(self, name="covariance"): """Covariance. Covariance is (possibly) defined only for non-scalar-event distributions. For example, for a length-`k`, vector-valued distribution, it is calculated as, ```none Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])] ``` where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E` denotes expectation. Alternatively, for non-vector, multivariate distributions (e.g., matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices under some vectorization of the events, i.e., ```none Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above] ``` where `Cov` is a (batch of) `k' x k'` matrices, `0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function mapping indices of this distribution's event dimensions to indices of a length-`k'` vector. Args: name: Python `str` prepended to names of ops created by this function. Returns: covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']` where the first `n` dimensions are batch coordinates and `k' = reduce_prod(self.event_shape)`. """ with self._name_scope(name): return self._covariance() def _mode(self): raise NotImplementedError("mode is not implemented") def mode(self, name="mode"): """Mode.""" with self._name_scope(name): return self._mode() def _cross_entropy(self, other): return kullback_leibler.cross_entropy( self, other, allow_nan_stats=self.allow_nan_stats) def cross_entropy(self, other, name="cross_entropy"): """Computes the (Shannon) cross entropy. Denote this distribution (`self`) by `P` and the `other` distribution by `Q`. Assuming `P, Q` are absolutely continuous with respect to one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as: ```none H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x) ``` where `F` denotes the support of the random variable `X ~ P`. Args: other: `tf.distributions.Distribution` instance. name: Python `str` prepended to names of ops created by this function. Returns: cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]` representing `n` different calculations of (Shanon) cross entropy. """ with self._name_scope(name): return self._cross_entropy(other) def _kl_divergence(self, other): return kullback_leibler.kl_divergence( self, other, allow_nan_stats=self.allow_nan_stats) def kl_divergence(self, other, name="kl_divergence"): """Computes the Kullback--Leibler divergence. Denote this distribution (`self`) by `p` and the `other` distribution by `q`. Assuming `p, q` are absolutely continuous with respect to reference measure `r`, the KL divergence is defined as: ```none KL[p, q] = E_p[log(p(X)/q(X))] = -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x) = H[p, q] - H[p] ``` where `F` denotes the support of the random variable `X ~ p`, `H[., .]` denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy. Args: other: `tf.distributions.Distribution` instance. name: Python `str` prepended to names of ops created by this function. Returns: kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]` representing `n` different calculations of the Kullback-Leibler divergence. """ with self._name_scope(name): return self._kl_divergence(other) def __str__(self): return ("tf.distributions.{type_name}(" "\"{self_name}\"" "{maybe_batch_shape}" "{maybe_event_shape}" ", dtype={dtype})".format( type_name=type(self).__name__, self_name=self.name, maybe_batch_shape=(", batch_shape={}".format(self.batch_shape) if self.batch_shape.ndims is not None else ""), maybe_event_shape=(", event_shape={}".format(self.event_shape) if self.event_shape.ndims is not None else ""), dtype=self.dtype.name)) def __repr__(self): return ("<tf.distributions.{type_name} " "'{self_name}'" " batch_shape={batch_shape}" " event_shape={event_shape}" " dtype={dtype}>".format( type_name=type(self).__name__, self_name=self.name, batch_shape=self.batch_shape, event_shape=self.event_shape, dtype=self.dtype.name)) @contextlib.contextmanager def _name_scope(self, name=None, values=None): """Helper function to standardize op scope.""" with ops.name_scope(self.name): with ops.name_scope(name, values=( ([] if values is None else values) + self._graph_parents)) as scope: yield scope def _expand_sample_shape_to_vector(self, x, name): """Helper to `sample` which ensures input is 1D.""" x_static_val = tensor_util.constant_value(x) if x_static_val is None: prod = math_ops.reduce_prod(x) else: prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype()) ndims = x.get_shape().ndims # != sample_ndims if ndims is None: # Maybe expand_dims. ndims = array_ops.rank(x) expanded_shape = util.pick_vector( math_ops.equal(ndims, 0), np.array([1], dtype=np.int32), array_ops.shape(x)) x = array_ops.reshape(x, expanded_shape) elif ndims == 0: # Definitely expand_dims. if x_static_val is not None: x = ops.convert_to_tensor( np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()), name=name) else: x = array_ops.reshape(x, [1]) elif ndims != 1: raise ValueError("Input is neither scalar nor vector.") return x, prod def _set_sample_static_shape(self, x, sample_shape): """Helper to `sample`; sets static shape info.""" # Set shape hints. sample_shape = tensor_shape.TensorShape( tensor_util.constant_value(sample_shape)) ndims = x.get_shape().ndims sample_ndims = sample_shape.ndims batch_ndims = self.batch_shape.ndims event_ndims = self.event_shape.ndims # Infer rank(x). if (ndims is None and sample_ndims is not None and batch_ndims is not None and event_ndims is not None): ndims = sample_ndims + batch_ndims + event_ndims x.set_shape([None] * ndims) # Infer sample shape. if ndims is not None and sample_ndims is not None: shape = sample_shape.concatenate([None]*(ndims - sample_ndims)) x.set_shape(x.get_shape().merge_with(shape)) # Infer event shape. if ndims is not None and event_ndims is not None: shape = tensor_shape.TensorShape( [None]*(ndims - event_ndims)).concatenate(self.event_shape) x.set_shape(x.get_shape().merge_with(shape)) # Infer batch shape. if batch_ndims is not None: if ndims is not None: if sample_ndims is None and event_ndims is not None: sample_ndims = ndims - batch_ndims - event_ndims elif event_ndims is None and sample_ndims is not None: event_ndims = ndims - batch_ndims - sample_ndims if sample_ndims is not None and event_ndims is not None: shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate( self.batch_shape).concatenate([None]*event_ndims) x.set_shape(x.get_shape().merge_with(shape)) return x def _is_scalar_helper(self, static_shape, dynamic_shape_fn): """Implementation for `is_scalar_batch` and `is_scalar_event`.""" if static_shape.ndims is not None: return static_shape.ndims == 0 shape = dynamic_shape_fn() if (shape.get_shape().ndims is not None and shape.get_shape()[0].value is not None): # If the static_shape is correctly written then we should never execute # this branch. We keep it just in case there's some unimagined corner # case. return shape.get_shape().as_list() == [0] return math_ops.equal(array_ops.shape(shape)[0], 0)
benoitsteiner/tensorflow-xsmm
tensorflow/python/ops/distributions/distribution.py
Python
apache-2.0
43,119
[ "Gaussian" ]
9ef53f7040b8af43ef00424b99a7aa168c6862e2af21cce76b89138b590d813c
#!/usr/bin/python # -*- coding: utf8 -*- import argparse import os import subprocess import sys parser = argparse.ArgumentParser(description='Animate Paraview data') optional = parser._action_groups.pop() required = parser.add_argument_group('required arguments') required.add_argument('-l','--layout', help="Paraview State file (.pvsm-file); gained via 'Save State...' in ParaView", required=True) optional.add_argument('-r','--reader', help="Path to custom reader plugin (e.g. 'path/to/libvisuReader.so'), required for .h5 files") optional.add_argument('-n','--nomovie', help='Only generate pictures', action='store_true') optional.add_argument('-f','--fps', type=int, default=10, help='Frames per second') optional.add_argument('-b','--bitrate', type=int, default=10000, help='Bitrate of movie') optional.add_argument('-m','--mpi', type=int, default=1, help='Number of MPI procs for rendering') optional.add_argument('-s','--scale', type=int, default=1, help='Magnification of rendering') optional.add_argument('-o','--output', default='', help='Appendix for filenames') optional.add_argument('-x','--folder', default='', help='relative output folder for images and movies') required.add_argument('plotfiles', nargs='+', help='Files to animate (.vtu/.pvtu-files)') parser._action_groups.append(optional) args = parser.parse_args() # if output folder does not exist, create it #cwd = os.getcwd() fp=os.path.join(os.getcwd(), args.folder) if not os.path.exists(fp): os.makedirs(fp) plotfiles = [f for f in args.plotfiles if (os.path.splitext(f)[1] in ['.pvtu', '.vtu', '.plt', '.vtm', '.h5']) ] has_h5_plotfiles = any([(os.path.splitext(f)[1] == '.h5') for f in plotfiles]) if has_h5_plotfiles and not args.reader : sys.exit("Please specifiy path to reader plugin (e.g. '-r path/to/libvisuReader.so') if input is HDF5!") i = 0 for p in plotfiles : i = i+1 sys.stdout.write('\r%05.2f %% Animate: %s' % (100.0 * i / len(plotfiles), p)) sys.stdout.flush() fn = os.path.splitext(p)[0]+ args.output + '.py' f = open(fn, 'w') # get filename of = os.path.splitext(p)[0] + args.output + '.png' of2=os.path.basename(of) # create output filename of=os.path.join(os.getcwd(), args.folder, of2) f.write("""from paraview.simple import * import os paraview.simple._DisableFirstRenderCameraReset() """) if args.reader : f.write("servermanager.LoadPlugin('%s')\n" % (args.reader)) f.write("""servermanager.LoadState('%s') statefilename = GetSources() plotfilename = None for k in statefilename.keys() : if os.path.splitext(k[0])[1] in ['.pvtu', '.vtu', '.plt', '.vtm', '.h5'] : plotfilename = k[0] break if not plotfilename : exit(1) reader = FindSource(plotfilename) reader.FileName = ['%s'] reader.FileNameChanged() RenderView1 = GetRenderView() if RenderView1.InteractionMode == "2D" : RenderView1.CameraParallelProjection=1 SetActiveView(RenderView1) Render() WriteImage('%s', Magnification=%d) """ % (args.layout, p, of, args.scale)) f.close() if args.mpi > 1 : cmd = ['mpirun', '-np', str(args.mpi), 'pvbatch', '--use-offscreen-rendering', fn] else : cmd = ['pvbatch', '--use-offscreen-rendering', fn] p = subprocess.Popen(cmd) p.wait() os.remove(fn) sys.stdout.write('\n') if not args.nomovie : print 'Generate movie ....' cmd = ['mencoder'] cmd.append('mf://%s/*%s.png' % (fp,args.output )) cmd.append('-mf') cmd.append('fps=%d' % args.fps) cmd.append('-o') cmd.append('%s' % os.path.join(fp, os.path.basename(os.path.splitext(args.layout)[0])) + '.avi') cmd.append('-ovc') cmd.append('lavc') cmd.append('-lavcopts') cmd.append('vcodec=msmpeg4v2:vbitrate=%d' % args.bitrate) p = subprocess.Popen(cmd) p.wait() print 'done'
flexi-framework/flexi
tools/animate/animate_paraview.py
Python
gpl-3.0
3,859
[ "ParaView" ]
c5abe74dc03e2a4024921e18c1ca1fba8f228f5e74c5672e0c9d1fb82249b565
"""Command line options, ini-file and conftest.py processing.""" import argparse import collections.abc import contextlib import copy import enum import inspect import os import re import shlex import sys import types import warnings from functools import lru_cache from pathlib import Path from types import TracebackType from typing import Any from typing import Callable from typing import Dict from typing import Generator from typing import IO from typing import Iterable from typing import Iterator from typing import List from typing import Optional from typing import Sequence from typing import Set from typing import TextIO from typing import Tuple from typing import Type from typing import TYPE_CHECKING from typing import Union import attr import py from pluggy import HookimplMarker from pluggy import HookspecMarker from pluggy import PluginManager import _pytest._code import _pytest.deprecated import _pytest.hookspec from .exceptions import PrintHelp as PrintHelp from .exceptions import UsageError as UsageError from .findpaths import determine_setup from _pytest._code import ExceptionInfo from _pytest._code import filter_traceback from _pytest._io import TerminalWriter from _pytest.compat import final from _pytest.compat import importlib_metadata from _pytest.outcomes import fail from _pytest.outcomes import Skipped from _pytest.pathlib import bestrelpath from _pytest.pathlib import import_path from _pytest.pathlib import ImportMode from _pytest.store import Store from _pytest.warning_types import PytestConfigWarning if TYPE_CHECKING: from _pytest._code.code import _TracebackStyle from _pytest.terminal import TerminalReporter from .argparsing import Argument _PluggyPlugin = object """A type to represent plugin objects. Plugins can be any namespace, so we can't narrow it down much, but we use an alias to make the intent clear. Ideally this type would be provided by pluggy itself. """ hookimpl = HookimplMarker("pytest") hookspec = HookspecMarker("pytest") @final class ExitCode(enum.IntEnum): """Encodes the valid exit codes by pytest. Currently users and plugins may supply other exit codes as well. .. versionadded:: 5.0 """ #: Tests passed. OK = 0 #: Tests failed. TESTS_FAILED = 1 #: pytest was interrupted. INTERRUPTED = 2 #: An internal error got in the way. INTERNAL_ERROR = 3 #: pytest was misused. USAGE_ERROR = 4 #: pytest couldn't find tests. NO_TESTS_COLLECTED = 5 class ConftestImportFailure(Exception): def __init__( self, path: py.path.local, excinfo: Tuple[Type[Exception], Exception, TracebackType], ) -> None: super().__init__(path, excinfo) self.path = path self.excinfo = excinfo def __str__(self) -> str: return "{}: {} (from {})".format( self.excinfo[0].__name__, self.excinfo[1], self.path ) def filter_traceback_for_conftest_import_failure( entry: _pytest._code.TracebackEntry, ) -> bool: """Filter tracebacks entries which point to pytest internals or importlib. Make a special case for importlib because we use it to import test modules and conftest files in _pytest.pathlib.import_path. """ return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) def main( args: Optional[Union[List[str], py.path.local]] = None, plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, ) -> Union[int, ExitCode]: """Perform an in-process test run. :param args: List of command line arguments. :param plugins: List of plugin objects to be auto-registered during initialization. :returns: An exit code. """ try: try: config = _prepareconfig(args, plugins) except ConftestImportFailure as e: exc_info = ExceptionInfo(e.excinfo) tw = TerminalWriter(sys.stderr) tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) exc_info.traceback = exc_info.traceback.filter( filter_traceback_for_conftest_import_failure ) exc_repr = ( exc_info.getrepr(style="short", chain=False) if exc_info.traceback else exc_info.exconly() ) formatted_tb = str(exc_repr) for line in formatted_tb.splitlines(): tw.line(line.rstrip(), red=True) return ExitCode.USAGE_ERROR else: try: ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( config=config ) try: return ExitCode(ret) except ValueError: return ret finally: config._ensure_unconfigure() except UsageError as e: tw = TerminalWriter(sys.stderr) for msg in e.args: tw.line(f"ERROR: {msg}\n", red=True) return ExitCode.USAGE_ERROR def console_main() -> int: """The CLI entry point of pytest. This function is not meant for programmable use; use `main()` instead. """ # https://docs.python.org/3/library/signal.html#note-on-sigpipe try: code = main() sys.stdout.flush() return code except BrokenPipeError: # Python flushes standard streams on exit; redirect remaining output # to devnull to avoid another BrokenPipeError at shutdown devnull = os.open(os.devnull, os.O_WRONLY) os.dup2(devnull, sys.stdout.fileno()) return 1 # Python exits with error code 1 on EPIPE class cmdline: # compatibility namespace main = staticmethod(main) def filename_arg(path: str, optname: str) -> str: """Argparse type validator for filename arguments. :path: Path of filename. :optname: Name of the option. """ if os.path.isdir(path): raise UsageError(f"{optname} must be a filename, given: {path}") return path def directory_arg(path: str, optname: str) -> str: """Argparse type validator for directory arguments. :path: Path of directory. :optname: Name of the option. """ if not os.path.isdir(path): raise UsageError(f"{optname} must be a directory, given: {path}") return path # Plugins that cannot be disabled via "-p no:X" currently. essential_plugins = ( "mark", "main", "runner", "fixtures", "helpconfig", # Provides -p. ) default_plugins = essential_plugins + ( "python", "terminal", "debugging", "unittest", "capture", "skipping", "tmpdir", "monkeypatch", "recwarn", "pastebin", "nose", "assertion", "junitxml", "doctest", "cacheprovider", "freeze_support", "setuponly", "setupplan", "stepwise", "warnings", "logging", "reports", *(["unraisableexception", "threadexception"] if sys.version_info >= (3, 8) else []), "faulthandler", ) builtin_plugins = set(default_plugins) builtin_plugins.add("pytester") builtin_plugins.add("pytester_assertions") def get_config( args: Optional[List[str]] = None, plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, ) -> "Config": # subsequent calls to main will create a fresh instance pluginmanager = PytestPluginManager() config = Config( pluginmanager, invocation_params=Config.InvocationParams( args=args or (), plugins=plugins, dir=Path.cwd(), ), ) if args is not None: # Handle any "-p no:plugin" args. pluginmanager.consider_preparse(args, exclude_only=True) for spec in default_plugins: pluginmanager.import_plugin(spec) return config def get_plugin_manager() -> "PytestPluginManager": """Obtain a new instance of the :py:class:`_pytest.config.PytestPluginManager`, with default plugins already loaded. This function can be used by integration with other tools, like hooking into pytest to run tests into an IDE. """ return get_config().pluginmanager def _prepareconfig( args: Optional[Union[py.path.local, List[str]]] = None, plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, ) -> "Config": if args is None: args = sys.argv[1:] elif isinstance(args, py.path.local): args = [str(args)] elif not isinstance(args, list): msg = "`args` parameter expected to be a list of strings, got: {!r} (type: {})" raise TypeError(msg.format(args, type(args))) config = get_config(args, plugins) pluginmanager = config.pluginmanager try: if plugins: for plugin in plugins: if isinstance(plugin, str): pluginmanager.consider_pluginarg(plugin) else: pluginmanager.register(plugin) config = pluginmanager.hook.pytest_cmdline_parse( pluginmanager=pluginmanager, args=args ) return config except BaseException: config._ensure_unconfigure() raise @final class PytestPluginManager(PluginManager): """A :py:class:`pluggy.PluginManager <pluggy.PluginManager>` with additional pytest-specific functionality: * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and ``pytest_plugins`` global variables found in plugins being loaded. * ``conftest.py`` loading during start-up. """ def __init__(self) -> None: import _pytest.assertion super().__init__("pytest") # The objects are module objects, only used generically. self._conftest_plugins: Set[types.ModuleType] = set() # State related to local conftest plugins. self._dirpath2confmods: Dict[py.path.local, List[types.ModuleType]] = {} self._conftestpath2mod: Dict[Path, types.ModuleType] = {} self._confcutdir: Optional[py.path.local] = None self._noconftest = False self._duplicatepaths: Set[py.path.local] = set() # plugins that were explicitly skipped with pytest.skip # list of (module name, skip reason) # previously we would issue a warning when a plugin was skipped, but # since we refactored warnings as first citizens of Config, they are # just stored here to be used later. self.skipped_plugins: List[Tuple[str, str]] = [] self.add_hookspecs(_pytest.hookspec) self.register(self) if os.environ.get("PYTEST_DEBUG"): err: IO[str] = sys.stderr encoding: str = getattr(err, "encoding", "utf8") try: err = open( os.dup(err.fileno()), mode=err.mode, buffering=1, encoding=encoding, ) except Exception: pass self.trace.root.setwriter(err.write) self.enable_tracing() # Config._consider_importhook will set a real object if required. self.rewrite_hook = _pytest.assertion.DummyRewriteHook() # Used to know when we are importing conftests after the pytest_configure stage. self._configured = False def parse_hookimpl_opts(self, plugin: _PluggyPlugin, name: str): # pytest hooks are always prefixed with "pytest_", # so we avoid accessing possibly non-readable attributes # (see issue #1073). if not name.startswith("pytest_"): return # Ignore names which can not be hooks. if name == "pytest_plugins": return method = getattr(plugin, name) opts = super().parse_hookimpl_opts(plugin, name) # Consider only actual functions for hooks (#3775). if not inspect.isroutine(method): return # Collect unmarked hooks as long as they have the `pytest_' prefix. if opts is None and name.startswith("pytest_"): opts = {} if opts is not None: # TODO: DeprecationWarning, people should use hookimpl # https://github.com/pytest-dev/pytest/issues/4562 known_marks = {m.name for m in getattr(method, "pytestmark", [])} for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): opts.setdefault(name, hasattr(method, name) or name in known_marks) return opts def parse_hookspec_opts(self, module_or_class, name: str): opts = super().parse_hookspec_opts(module_or_class, name) if opts is None: method = getattr(module_or_class, name) if name.startswith("pytest_"): # todo: deprecate hookspec hacks # https://github.com/pytest-dev/pytest/issues/4562 known_marks = {m.name for m in getattr(method, "pytestmark", [])} opts = { "firstresult": hasattr(method, "firstresult") or "firstresult" in known_marks, "historic": hasattr(method, "historic") or "historic" in known_marks, } return opts def register( self, plugin: _PluggyPlugin, name: Optional[str] = None ) -> Optional[str]: if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: warnings.warn( PytestConfigWarning( "{} plugin has been merged into the core, " "please remove it from your requirements.".format( name.replace("_", "-") ) ) ) return None ret: Optional[str] = super().register(plugin, name) if ret: self.hook.pytest_plugin_registered.call_historic( kwargs=dict(plugin=plugin, manager=self) ) if isinstance(plugin, types.ModuleType): self.consider_module(plugin) return ret def getplugin(self, name: str): # Support deprecated naming because plugins (xdist e.g.) use it. plugin: Optional[_PluggyPlugin] = self.get_plugin(name) return plugin def hasplugin(self, name: str) -> bool: """Return whether a plugin with the given name is registered.""" return bool(self.get_plugin(name)) def pytest_configure(self, config: "Config") -> None: """:meta private:""" # XXX now that the pluginmanager exposes hookimpl(tryfirst...) # we should remove tryfirst/trylast as markers. config.addinivalue_line( "markers", "tryfirst: mark a hook implementation function such that the " "plugin machinery will try to call it first/as early as possible.", ) config.addinivalue_line( "markers", "trylast: mark a hook implementation function such that the " "plugin machinery will try to call it last/as late as possible.", ) self._configured = True # # Internal API for local conftest plugin handling. # def _set_initial_conftests(self, namespace: argparse.Namespace) -> None: """Load initial conftest files given a preparsed "namespace". As conftest files may add their own command line options which have arguments ('--my-opt somepath') we might get some false positives. All builtin and 3rd party plugins will have been loaded, however, so common options will not confuse our logic here. """ current = py.path.local() self._confcutdir = ( current.join(namespace.confcutdir, abs=True) if namespace.confcutdir else None ) self._noconftest = namespace.noconftest self._using_pyargs = namespace.pyargs testpaths = namespace.file_or_dir foundanchor = False for testpath in testpaths: path = str(testpath) # remove node-id syntax i = path.find("::") if i != -1: path = path[:i] anchor = current.join(path, abs=1) if anchor.exists(): # we found some file object self._try_load_conftest(anchor, namespace.importmode) foundanchor = True if not foundanchor: self._try_load_conftest(current, namespace.importmode) def _try_load_conftest( self, anchor: py.path.local, importmode: Union[str, ImportMode] ) -> None: self._getconftestmodules(anchor, importmode) # let's also consider test* subdirs if anchor.check(dir=1): for x in anchor.listdir("test*"): if x.check(dir=1): self._getconftestmodules(x, importmode) @lru_cache(maxsize=128) def _getconftestmodules( self, path: py.path.local, importmode: Union[str, ImportMode], ) -> List[types.ModuleType]: if self._noconftest: return [] if path.isfile(): directory = path.dirpath() else: directory = path # XXX these days we may rather want to use config.rootpath # and allow users to opt into looking into the rootdir parent # directories instead of requiring to specify confcutdir. clist = [] for parent in directory.parts(): if self._confcutdir and self._confcutdir.relto(parent): continue conftestpath = parent.join("conftest.py") if conftestpath.isfile(): mod = self._importconftest(conftestpath, importmode) clist.append(mod) self._dirpath2confmods[directory] = clist return clist def _rget_with_confmod( self, name: str, path: py.path.local, importmode: Union[str, ImportMode], ) -> Tuple[types.ModuleType, Any]: modules = self._getconftestmodules(path, importmode) for mod in reversed(modules): try: return mod, getattr(mod, name) except AttributeError: continue raise KeyError(name) def _importconftest( self, conftestpath: py.path.local, importmode: Union[str, ImportMode], ) -> types.ModuleType: # Use a resolved Path object as key to avoid loading the same conftest # twice with build systems that create build directories containing # symlinks to actual files. # Using Path().resolve() is better than py.path.realpath because # it resolves to the correct path/drive in case-insensitive file systems (#5792) key = Path(str(conftestpath)).resolve() with contextlib.suppress(KeyError): return self._conftestpath2mod[key] pkgpath = conftestpath.pypkgpath() if pkgpath is None: _ensure_removed_sysmodule(conftestpath.purebasename) try: mod = import_path(conftestpath, mode=importmode) except Exception as e: assert e.__traceback__ is not None exc_info = (type(e), e, e.__traceback__) raise ConftestImportFailure(conftestpath, exc_info) from e self._check_non_top_pytest_plugins(mod, conftestpath) self._conftest_plugins.add(mod) self._conftestpath2mod[key] = mod dirpath = conftestpath.dirpath() if dirpath in self._dirpath2confmods: for path, mods in self._dirpath2confmods.items(): if path and path.relto(dirpath) or path == dirpath: assert mod not in mods mods.append(mod) self.trace(f"loading conftestmodule {mod!r}") self.consider_conftest(mod) return mod def _check_non_top_pytest_plugins( self, mod: types.ModuleType, conftestpath: py.path.local, ) -> None: if ( hasattr(mod, "pytest_plugins") and self._configured and not self._using_pyargs ): msg = ( "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n" "It affects the entire test suite instead of just below the conftest as expected.\n" " {}\n" "Please move it to a top level conftest file at the rootdir:\n" " {}\n" "For more information, visit:\n" " https://docs.pytest.org/en/stable/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" ) fail(msg.format(conftestpath, self._confcutdir), pytrace=False) # # API for bootstrapping plugin loading # # def consider_preparse( self, args: Sequence[str], *, exclude_only: bool = False ) -> None: i = 0 n = len(args) while i < n: opt = args[i] i += 1 if isinstance(opt, str): if opt == "-p": try: parg = args[i] except IndexError: return i += 1 elif opt.startswith("-p"): parg = opt[2:] else: continue if exclude_only and not parg.startswith("no:"): continue self.consider_pluginarg(parg) def consider_pluginarg(self, arg: str) -> None: if arg.startswith("no:"): name = arg[3:] if name in essential_plugins: raise UsageError("plugin %s cannot be disabled" % name) # PR #4304: remove stepwise if cacheprovider is blocked. if name == "cacheprovider": self.set_blocked("stepwise") self.set_blocked("pytest_stepwise") self.set_blocked(name) if not name.startswith("pytest_"): self.set_blocked("pytest_" + name) else: name = arg # Unblock the plugin. None indicates that it has been blocked. # There is no interface with pluggy for this. if self._name2plugin.get(name, -1) is None: del self._name2plugin[name] if not name.startswith("pytest_"): if self._name2plugin.get("pytest_" + name, -1) is None: del self._name2plugin["pytest_" + name] self.import_plugin(arg, consider_entry_points=True) def consider_conftest(self, conftestmodule: types.ModuleType) -> None: self.register(conftestmodule, name=conftestmodule.__file__) def consider_env(self) -> None: self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) def consider_module(self, mod: types.ModuleType) -> None: self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) def _import_plugin_specs( self, spec: Union[None, types.ModuleType, str, Sequence[str]] ) -> None: plugins = _get_plugin_specs_as_list(spec) for import_spec in plugins: self.import_plugin(import_spec) def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None: """Import a plugin with ``modname``. If ``consider_entry_points`` is True, entry point names are also considered to find a plugin. """ # Most often modname refers to builtin modules, e.g. "pytester", # "terminal" or "capture". Those plugins are registered under their # basename for historic purposes but must be imported with the # _pytest prefix. assert isinstance(modname, str), ( "module name as text required, got %r" % modname ) if self.is_blocked(modname) or self.get_plugin(modname) is not None: return importspec = "_pytest." + modname if modname in builtin_plugins else modname self.rewrite_hook.mark_rewrite(importspec) if consider_entry_points: loaded = self.load_setuptools_entrypoints("pytest11", name=modname) if loaded: return try: __import__(importspec) except ImportError as e: raise ImportError( 'Error importing plugin "{}": {}'.format(modname, str(e.args[0])) ).with_traceback(e.__traceback__) from e except Skipped as e: self.skipped_plugins.append((modname, e.msg or "")) else: mod = sys.modules[importspec] self.register(mod, modname) def _get_plugin_specs_as_list( specs: Union[None, types.ModuleType, str, Sequence[str]] ) -> List[str]: """Parse a plugins specification into a list of plugin names.""" # None means empty. if specs is None: return [] # Workaround for #3899 - a submodule which happens to be called "pytest_plugins". if isinstance(specs, types.ModuleType): return [] # Comma-separated list. if isinstance(specs, str): return specs.split(",") if specs else [] # Direct specification. if isinstance(specs, collections.abc.Sequence): return list(specs) raise UsageError( "Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %r" % specs ) def _ensure_removed_sysmodule(modname: str) -> None: try: del sys.modules[modname] except KeyError: pass class Notset: def __repr__(self): return "<NOTSET>" notset = Notset() def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]: """Given an iterable of file names in a source distribution, return the "names" that should be marked for assertion rewrite. For example the package "pytest_mock/__init__.py" should be added as "pytest_mock" in the assertion rewrite mechanism. This function has to deal with dist-info based distributions and egg based distributions (which are still very much in use for "editable" installs). Here are the file names as seen in a dist-info based distribution: pytest_mock/__init__.py pytest_mock/_version.py pytest_mock/plugin.py pytest_mock.egg-info/PKG-INFO Here are the file names as seen in an egg based distribution: src/pytest_mock/__init__.py src/pytest_mock/_version.py src/pytest_mock/plugin.py src/pytest_mock.egg-info/PKG-INFO LICENSE setup.py We have to take in account those two distribution flavors in order to determine which names should be considered for assertion rewriting. More information: https://github.com/pytest-dev/pytest-mock/issues/167 """ package_files = list(package_files) seen_some = False for fn in package_files: is_simple_module = "/" not in fn and fn.endswith(".py") is_package = fn.count("/") == 1 and fn.endswith("__init__.py") if is_simple_module: module_name, _ = os.path.splitext(fn) # we ignore "setup.py" at the root of the distribution if module_name != "setup": seen_some = True yield module_name elif is_package: package_name = os.path.dirname(fn) seen_some = True yield package_name if not seen_some: # At this point we did not find any packages or modules suitable for assertion # rewriting, so we try again by stripping the first path component (to account for # "src" based source trees for example). # This approach lets us have the common case continue to be fast, as egg-distributions # are rarer. new_package_files = [] for fn in package_files: parts = fn.split("/") new_fn = "/".join(parts[1:]) if new_fn: new_package_files.append(new_fn) if new_package_files: yield from _iter_rewritable_modules(new_package_files) def _args_converter(args: Iterable[str]) -> Tuple[str, ...]: return tuple(args) @final class Config: """Access to configuration values, pluginmanager and plugin hooks. :param PytestPluginManager pluginmanager: :param InvocationParams invocation_params: Object containing parameters regarding the :func:`pytest.main` invocation. """ @final @attr.s(frozen=True) class InvocationParams: """Holds parameters passed during :func:`pytest.main`. The object attributes are read-only. .. versionadded:: 5.1 .. note:: Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` ini option are handled by pytest, not being included in the ``args`` attribute. Plugins accessing ``InvocationParams`` must be aware of that. """ args = attr.ib(type=Tuple[str, ...], converter=_args_converter) """The command-line arguments as passed to :func:`pytest.main`. :type: Tuple[str, ...] """ plugins = attr.ib(type=Optional[Sequence[Union[str, _PluggyPlugin]]]) """Extra plugins, might be `None`. :type: Optional[Sequence[Union[str, plugin]]] """ dir = attr.ib(type=Path) """The directory from which :func:`pytest.main` was invoked. :type: pathlib.Path """ def __init__( self, pluginmanager: PytestPluginManager, *, invocation_params: Optional[InvocationParams] = None, ) -> None: from .argparsing import Parser, FILE_OR_DIR if invocation_params is None: invocation_params = self.InvocationParams( args=(), plugins=None, dir=Path.cwd() ) self.option = argparse.Namespace() """Access to command line option as attributes. :type: argparse.Namespace """ self.invocation_params = invocation_params """The parameters with which pytest was invoked. :type: InvocationParams """ _a = FILE_OR_DIR self._parser = Parser( usage=f"%(prog)s [options] [{_a}] [{_a}] [...]", processopt=self._processopt, ) self.pluginmanager = pluginmanager """The plugin manager handles plugin registration and hook invocation. :type: PytestPluginManager """ self.trace = self.pluginmanager.trace.root.get("config") self.hook = self.pluginmanager.hook self._inicache: Dict[str, Any] = {} self._override_ini: Sequence[str] = () self._opt2dest: Dict[str, str] = {} self._cleanup: List[Callable[[], None]] = [] # A place where plugins can store information on the config for their # own use. Currently only intended for internal plugins. self._store = Store() self.pluginmanager.register(self, "pytestconfig") self._configured = False self.hook.pytest_addoption.call_historic( kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager) ) if TYPE_CHECKING: from _pytest.cacheprovider import Cache self.cache: Optional[Cache] = None @property def invocation_dir(self) -> py.path.local: """The directory from which pytest was invoked. Prefer to use :attr:`invocation_params.dir <InvocationParams.dir>`, which is a :class:`pathlib.Path`. :type: py.path.local """ return py.path.local(str(self.invocation_params.dir)) @property def rootpath(self) -> Path: """The path to the :ref:`rootdir <rootdir>`. :type: pathlib.Path .. versionadded:: 6.1 """ return self._rootpath @property def rootdir(self) -> py.path.local: """The path to the :ref:`rootdir <rootdir>`. Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`. :type: py.path.local """ return py.path.local(str(self.rootpath)) @property def inipath(self) -> Optional[Path]: """The path to the :ref:`configfile <configfiles>`. :type: Optional[pathlib.Path] .. versionadded:: 6.1 """ return self._inipath @property def inifile(self) -> Optional[py.path.local]: """The path to the :ref:`configfile <configfiles>`. Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`. :type: Optional[py.path.local] """ return py.path.local(str(self.inipath)) if self.inipath else None def add_cleanup(self, func: Callable[[], None]) -> None: """Add a function to be called when the config object gets out of use (usually coninciding with pytest_unconfigure).""" self._cleanup.append(func) def _do_configure(self) -> None: assert not self._configured self._configured = True with warnings.catch_warnings(): warnings.simplefilter("default") self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) def _ensure_unconfigure(self) -> None: if self._configured: self._configured = False self.hook.pytest_unconfigure(config=self) self.hook.pytest_configure._call_history = [] while self._cleanup: fin = self._cleanup.pop() fin() def get_terminal_writer(self) -> TerminalWriter: terminalreporter: TerminalReporter = self.pluginmanager.get_plugin( "terminalreporter" ) return terminalreporter._tw def pytest_cmdline_parse( self, pluginmanager: PytestPluginManager, args: List[str] ) -> "Config": try: self.parse(args) except UsageError: # Handle --version and --help here in a minimal fashion. # This gets done via helpconfig normally, but its # pytest_cmdline_main is not called in case of errors. if getattr(self.option, "version", False) or "--version" in args: from _pytest.helpconfig import showversion showversion(self) elif ( getattr(self.option, "help", False) or "--help" in args or "-h" in args ): self._parser._getparser().print_help() sys.stdout.write( "\nNOTE: displaying only minimal help due to UsageError.\n\n" ) raise return self def notify_exception( self, excinfo: ExceptionInfo[BaseException], option: Optional[argparse.Namespace] = None, ) -> None: if option and getattr(option, "fulltrace", False): style: _TracebackStyle = "long" else: style = "native" excrepr = excinfo.getrepr( funcargs=True, showlocals=getattr(option, "showlocals", False), style=style ) res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) if not any(res): for line in str(excrepr).split("\n"): sys.stderr.write("INTERNALERROR> %s\n" % line) sys.stderr.flush() def cwd_relative_nodeid(self, nodeid: str) -> str: # nodeid's are relative to the rootpath, compute relative to cwd. if self.invocation_params.dir != self.rootpath: fullpath = self.rootpath / nodeid nodeid = bestrelpath(self.invocation_params.dir, fullpath) return nodeid @classmethod def fromdictargs(cls, option_dict, args) -> "Config": """Constructor usable for subprocesses.""" config = get_config(args) config.option.__dict__.update(option_dict) config.parse(args, addopts=False) for x in config.option.plugins: config.pluginmanager.consider_pluginarg(x) return config def _processopt(self, opt: "Argument") -> None: for name in opt._short_opts + opt._long_opts: self._opt2dest[name] = opt.dest if hasattr(opt, "default"): if not hasattr(self.option, opt.dest): setattr(self.option, opt.dest, opt.default) @hookimpl(trylast=True) def pytest_load_initial_conftests(self, early_config: "Config") -> None: self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) def _initini(self, args: Sequence[str]) -> None: ns, unknown_args = self._parser.parse_known_and_unknown_args( args, namespace=copy.copy(self.option) ) rootpath, inipath, inicfg = determine_setup( ns.inifilename, ns.file_or_dir + unknown_args, rootdir_cmd_arg=ns.rootdir or None, config=self, ) self._rootpath = rootpath self._inipath = inipath self.inicfg = inicfg self._parser.extra_info["rootdir"] = str(self.rootpath) self._parser.extra_info["inifile"] = str(self.inipath) self._parser.addini("addopts", "extra command line options", "args") self._parser.addini("minversion", "minimally required pytest version") self._parser.addini( "required_plugins", "plugins that must be present for pytest to run", type="args", default=[], ) self._override_ini = ns.override_ini or () def _consider_importhook(self, args: Sequence[str]) -> None: """Install the PEP 302 import hook if using assertion rewriting. Needs to parse the --assert=<mode> option from the commandline and find all the installed plugins to mark them for rewriting by the importhook. """ ns, unknown_args = self._parser.parse_known_and_unknown_args(args) mode = getattr(ns, "assertmode", "plain") if mode == "rewrite": import _pytest.assertion try: hook = _pytest.assertion.install_importhook(self) except SystemError: mode = "plain" else: self._mark_plugins_for_rewrite(hook) self._warn_about_missing_assertion(mode) def _mark_plugins_for_rewrite(self, hook) -> None: """Given an importhook, mark for rewrite any top-level modules or packages in the distribution package for all pytest plugins.""" self.pluginmanager.rewrite_hook = hook if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): # We don't autoload from setuptools entry points, no need to continue. return package_files = ( str(file) for dist in importlib_metadata.distributions() if any(ep.group == "pytest11" for ep in dist.entry_points) for file in dist.files or [] ) for name in _iter_rewritable_modules(package_files): hook.mark_rewrite(name) def _validate_args(self, args: List[str], via: str) -> List[str]: """Validate known args.""" self._parser._config_source_hint = via # type: ignore try: self._parser.parse_known_and_unknown_args( args, namespace=copy.copy(self.option) ) finally: del self._parser._config_source_hint # type: ignore return args def _preparse(self, args: List[str], addopts: bool = True) -> None: if addopts: env_addopts = os.environ.get("PYTEST_ADDOPTS", "") if len(env_addopts): args[:] = ( self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") + args ) self._initini(args) if addopts: args[:] = ( self._validate_args(self.getini("addopts"), "via addopts config") + args ) self.known_args_namespace = self._parser.parse_known_args( args, namespace=copy.copy(self.option) ) self._checkversion() self._consider_importhook(args) self.pluginmanager.consider_preparse(args, exclude_only=False) if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): # Don't autoload from setuptools entry point. Only explicitly specified # plugins are going to be loaded. self.pluginmanager.load_setuptools_entrypoints("pytest11") self.pluginmanager.consider_env() self.known_args_namespace = self._parser.parse_known_args( args, namespace=copy.copy(self.known_args_namespace) ) self._validate_plugins() self._warn_about_skipped_plugins() if self.known_args_namespace.strict: self.issue_config_time_warning( _pytest.deprecated.STRICT_OPTION, stacklevel=2 ) if self.known_args_namespace.confcutdir is None and self.inipath is not None: confcutdir = str(self.inipath.parent) self.known_args_namespace.confcutdir = confcutdir try: self.hook.pytest_load_initial_conftests( early_config=self, args=args, parser=self._parser ) except ConftestImportFailure as e: if self.known_args_namespace.help or self.known_args_namespace.version: # we don't want to prevent --help/--version to work # so just let is pass and print a warning at the end self.issue_config_time_warning( PytestConfigWarning(f"could not load initial conftests: {e.path}"), stacklevel=2, ) else: raise @hookimpl(hookwrapper=True) def pytest_collection(self) -> Generator[None, None, None]: """Validate invalid ini keys after collection is done so we take in account options added by late-loading conftest files.""" yield self._validate_config_options() def _checkversion(self) -> None: import pytest minver = self.inicfg.get("minversion", None) if minver: # Imported lazily to improve start-up time. from packaging.version import Version if not isinstance(minver, str): raise pytest.UsageError( "%s: 'minversion' must be a single value" % self.inipath ) if Version(minver) > Version(pytest.__version__): raise pytest.UsageError( "%s: 'minversion' requires pytest-%s, actual pytest-%s'" % (self.inipath, minver, pytest.__version__,) ) def _validate_config_options(self) -> None: for key in sorted(self._get_unknown_ini_keys()): self._warn_or_fail_if_strict(f"Unknown config option: {key}\n") def _validate_plugins(self) -> None: required_plugins = sorted(self.getini("required_plugins")) if not required_plugins: return # Imported lazily to improve start-up time. from packaging.version import Version from packaging.requirements import InvalidRequirement, Requirement plugin_info = self.pluginmanager.list_plugin_distinfo() plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info} missing_plugins = [] for required_plugin in required_plugins: try: spec = Requirement(required_plugin) except InvalidRequirement: missing_plugins.append(required_plugin) continue if spec.name not in plugin_dist_info: missing_plugins.append(required_plugin) elif Version(plugin_dist_info[spec.name]) not in spec.specifier: missing_plugins.append(required_plugin) if missing_plugins: raise UsageError( "Missing required plugins: {}".format(", ".join(missing_plugins)), ) def _warn_or_fail_if_strict(self, message: str) -> None: if self.known_args_namespace.strict_config: raise UsageError(message) self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3) def _get_unknown_ini_keys(self) -> List[str]: parser_inicfg = self._parser._inidict return [name for name in self.inicfg if name not in parser_inicfg] def parse(self, args: List[str], addopts: bool = True) -> None: # Parse given cmdline arguments into this config object. assert not hasattr( self, "args" ), "can only parse cmdline args at most once per Config object" self.hook.pytest_addhooks.call_historic( kwargs=dict(pluginmanager=self.pluginmanager) ) self._preparse(args, addopts=addopts) # XXX deprecated hook: self.hook.pytest_cmdline_preparse(config=self, args=args) self._parser.after_preparse = True # type: ignore try: args = self._parser.parse_setoption( args, self.option, namespace=self.option ) if not args: if self.invocation_params.dir == self.rootpath: args = self.getini("testpaths") if not args: args = [str(self.invocation_params.dir)] self.args = args except PrintHelp: pass def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: """Issue and handle a warning during the "configure" stage. During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` function because it is not possible to have hookwrappers around ``pytest_configure``. This function is mainly intended for plugins that need to issue warnings during ``pytest_configure`` (or similar stages). :param warning: The warning instance. :param stacklevel: stacklevel forwarded to warnings.warn. """ if self.pluginmanager.is_blocked("warnings"): return cmdline_filters = self.known_args_namespace.pythonwarnings or [] config_filters = self.getini("filterwarnings") with warnings.catch_warnings(record=True) as records: warnings.simplefilter("always", type(warning)) apply_warning_filters(config_filters, cmdline_filters) warnings.warn(warning, stacklevel=stacklevel) if records: frame = sys._getframe(stacklevel - 1) location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name self.hook.pytest_warning_captured.call_historic( kwargs=dict( warning_message=records[0], when="config", item=None, location=location, ) ) self.hook.pytest_warning_recorded.call_historic( kwargs=dict( warning_message=records[0], when="config", nodeid="", location=location, ) ) def addinivalue_line(self, name: str, line: str) -> None: """Add a line to an ini-file option. The option must have been declared but might not yet be set in which case the line becomes the first line in its value.""" x = self.getini(name) assert isinstance(x, list) x.append(line) # modifies the cached list inline def getini(self, name: str): """Return configuration value from an :ref:`ini file <configfiles>`. If the specified name hasn't been registered through a prior :py:func:`parser.addini <_pytest.config.argparsing.Parser.addini>` call (usually from a plugin), a ValueError is raised. """ try: return self._inicache[name] except KeyError: self._inicache[name] = val = self._getini(name) return val def _getini(self, name: str): try: description, type, default = self._parser._inidict[name] except KeyError as e: raise ValueError(f"unknown configuration value: {name!r}") from e override_value = self._get_override_ini_value(name) if override_value is None: try: value = self.inicfg[name] except KeyError: if default is not None: return default if type is None: return "" return [] else: value = override_value # Coerce the values based on types. # # Note: some coercions are only required if we are reading from .ini files, because # the file format doesn't contain type information, but when reading from toml we will # get either str or list of str values (see _parse_ini_config_from_pyproject_toml). # For example: # # ini: # a_line_list = "tests acceptance" # in this case, we need to split the string to obtain a list of strings. # # toml: # a_line_list = ["tests", "acceptance"] # in this case, we already have a list ready to use. # if type == "pathlist": # TODO: This assert is probably not valid in all cases. assert self.inipath is not None dp = self.inipath.parent input_values = shlex.split(value) if isinstance(value, str) else value return [py.path.local(str(dp / x)) for x in input_values] elif type == "args": return shlex.split(value) if isinstance(value, str) else value elif type == "linelist": if isinstance(value, str): return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] else: return value elif type == "bool": return _strtobool(str(value).strip()) else: assert type in [None, "string"] return value def _getconftest_pathlist( self, name: str, path: py.path.local ) -> Optional[List[py.path.local]]: try: mod, relroots = self.pluginmanager._rget_with_confmod( name, path, self.getoption("importmode") ) except KeyError: return None modpath = py.path.local(mod.__file__).dirpath() values: List[py.path.local] = [] for relroot in relroots: if not isinstance(relroot, py.path.local): relroot = relroot.replace("/", os.sep) relroot = modpath.join(relroot, abs=True) values.append(relroot) return values def _get_override_ini_value(self, name: str) -> Optional[str]: value = None # override_ini is a list of "ini=value" options. # Always use the last item if multiple values are set for same ini-name, # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2. for ini_config in self._override_ini: try: key, user_ini_value = ini_config.split("=", 1) except ValueError as e: raise UsageError( "-o/--override-ini expects option=value style (got: {!r}).".format( ini_config ) ) from e else: if key == name: value = user_ini_value return value def getoption(self, name: str, default=notset, skip: bool = False): """Return command line option value. :param name: Name of the option. You may also specify the literal ``--OPT`` option instead of the "dest" option name. :param default: Default value if no option of that name exists. :param skip: If True, raise pytest.skip if option does not exists or has a None value. """ name = self._opt2dest.get(name, name) try: val = getattr(self.option, name) if val is None and skip: raise AttributeError(name) return val except AttributeError as e: if default is not notset: return default if skip: import pytest pytest.skip(f"no {name!r} option found") raise ValueError(f"no option named {name!r}") from e def getvalue(self, name: str, path=None): """Deprecated, use getoption() instead.""" return self.getoption(name) def getvalueorskip(self, name: str, path=None): """Deprecated, use getoption(skip=True) instead.""" return self.getoption(name, skip=True) def _warn_about_missing_assertion(self, mode: str) -> None: if not _assertion_supported(): if mode == "plain": warning_text = ( "ASSERTIONS ARE NOT EXECUTED" " and FAILING TESTS WILL PASS. Are you" " using python -O?" ) else: warning_text = ( "assertions not in test modules or" " plugins will be ignored" " because assert statements are not executed " "by the underlying Python interpreter " "(are you using python -O?)\n" ) self.issue_config_time_warning( PytestConfigWarning(warning_text), stacklevel=3, ) def _warn_about_skipped_plugins(self) -> None: for module_name, msg in self.pluginmanager.skipped_plugins: self.issue_config_time_warning( PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"), stacklevel=2, ) def _assertion_supported() -> bool: try: assert False except AssertionError: return True else: return False # type: ignore[unreachable] def create_terminal_writer( config: Config, file: Optional[TextIO] = None ) -> TerminalWriter: """Create a TerminalWriter instance configured according to the options in the config object. Every code which requires a TerminalWriter object and has access to a config object should use this function. """ tw = TerminalWriter(file=file) if config.option.color == "yes": tw.hasmarkup = True elif config.option.color == "no": tw.hasmarkup = False if config.option.code_highlight == "yes": tw.code_highlight = True elif config.option.code_highlight == "no": tw.code_highlight = False return tw def _strtobool(val: str) -> bool: """Convert a string representation of truth to True or False. True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. .. note:: Copied from distutils.util. """ val = val.lower() if val in ("y", "yes", "t", "true", "on", "1"): return True elif val in ("n", "no", "f", "false", "off", "0"): return False else: raise ValueError(f"invalid truth value {val!r}") @lru_cache(maxsize=50) def parse_warning_filter( arg: str, *, escape: bool ) -> Tuple[str, str, Type[Warning], str, int]: """Parse a warnings filter string. This is copied from warnings._setoption, but does not apply the filter, only parses it, and makes the escaping optional. """ parts = arg.split(":") if len(parts) > 5: raise warnings._OptionError(f"too many fields (max 5): {arg!r}") while len(parts) < 5: parts.append("") action_, message, category_, module, lineno_ = [s.strip() for s in parts] action: str = warnings._getaction(action_) # type: ignore[attr-defined] category: Type[Warning] = warnings._getcategory(category_) # type: ignore[attr-defined] if message and escape: message = re.escape(message) if module and escape: module = re.escape(module) + r"\Z" if lineno_: try: lineno = int(lineno_) if lineno < 0: raise ValueError except (ValueError, OverflowError) as e: raise warnings._OptionError(f"invalid lineno {lineno_!r}") from e else: lineno = 0 return action, message, category, module, lineno def apply_warning_filters( config_filters: Iterable[str], cmdline_filters: Iterable[str] ) -> None: """Applies pytest-configured filters to the warnings module""" # Filters should have this precedence: cmdline options, config. # Filters should be applied in the inverse order of precedence. for arg in config_filters: warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) for arg in cmdline_filters: warnings.filterwarnings(*parse_warning_filter(arg, escape=True))
pexip/os-pytest
src/_pytest/config/__init__.py
Python
mit
56,940
[ "VisIt" ]
472bd726a9cd6117cd6b3a80ba4a43bde5a21c05a96d78404b164c2a0f42314d
#__docformat__ = "restructuredtext en" # ******NOTICE*************** # optimize.py module by Travis E. Oliphant # # You may copy and use this module as you see fit with no # guarantee implied provided you keep this notice in all copies. # *****END NOTICE************ # A collection of optimization algorithms. Version 0.5 # CHANGES # Added fminbound (July 2001) # Added brute (Aug. 2002) # Finished line search satisfying strong Wolfe conditions (Mar. 2004) # Updated strong Wolfe conditions line search to use # cubic-interpolation (Mar. 2004) from __future__ import division, print_function, absolute_import # Minimization routines __all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg', 'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der', 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', 'line_search', 'check_grad', 'OptimizeResult', 'show_options', 'OptimizeWarning'] __docformat__ = "restructuredtext en" import warnings import sys import numpy from scipy._lib.six import callable from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze, vectorize, asarray, sqrt, Inf, asfarray, isinf) import numpy as np from .linesearch import (line_search_wolfe1, line_search_wolfe2, line_search_wolfe2 as line_search, LineSearchWarning) from inspect import getargspec # standard status messages of optimizers _status_message = {'success': 'Optimization terminated successfully.', 'maxfev': 'Maximum number of function evaluations has ' 'been exceeded.', 'maxiter': 'Maximum number of iterations has been ' 'exceeded.', 'pr_loss': 'Desired error not necessarily achieved due ' 'to precision loss.'} class MemoizeJac(object): """ Decorator that caches the value gradient of function each time it is called. """ def __init__(self, fun): self.fun = fun self.jac = None self.x = None def __call__(self, x, *args): self.x = numpy.asarray(x).copy() fg = self.fun(x, *args) self.jac = fg[1] return fg[0] def derivative(self, x, *args): if self.jac is not None and numpy.alltrue(x == self.x): return self.jac else: self(x, *args) return self.jac class OptimizeResult(dict): """ Represents the optimization result. Attributes ---------- x : ndarray The solution of the optimization. success : bool Whether or not the optimizer exited successfully. status : int Termination status of the optimizer. Its value depends on the underlying solver. Refer to `message` for details. message : str Description of the cause of the termination. fun, jac, hess, hess_inv : ndarray Values of objective function, Jacobian, Hessian or its inverse (if available). The Hessians may be approximations, see the documentation of the function in question. nfev, njev, nhev : int Number of evaluations of the objective functions and of its Jacobian and Hessian. nit : int Number of iterations performed by the optimizer. maxcv : float The maximum constraint violation. Notes ----- There may be additional attributes not listed above depending of the specific solver. Since this class is essentially a subclass of dict with attribute accessors, one can see which attributes are available using the `keys()` method. """ def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ def __repr__(self): if self.keys(): m = max(map(len, list(self.keys()))) + 1 return '\n'.join([k.rjust(m) + ': ' + repr(v) for k, v in self.items()]) else: return self.__class__.__name__ + "()" class OptimizeWarning(UserWarning): pass def _check_unknown_options(unknown_options): if unknown_options: msg = ", ".join(map(str, unknown_options.keys())) # Stack level 4: this is called from _minimize_*, which is # called from another function in Scipy. Level 4 is the first # level in user code. warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4) def is_array_scalar(x): """Test whether `x` is either a scalar or an array scalar. """ return np.size(x) == 1 _epsilon = sqrt(numpy.finfo(float).eps) def vecnorm(x, ord=2): if ord == Inf: return numpy.amax(numpy.abs(x)) elif ord == -Inf: return numpy.amin(numpy.abs(x)) else: return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord) def rosen(x): """ The Rosenbrock function. The function computed is:: sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0 Parameters ---------- x : array_like 1-D array of points at which the Rosenbrock function is to be computed. Returns ------- f : float The value of the Rosenbrock function. See Also -------- rosen_der, rosen_hess, rosen_hess_prod """ x = asarray(x) r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, axis=0) return r def rosen_der(x): """ The derivative (i.e. gradient) of the Rosenbrock function. Parameters ---------- x : array_like 1-D array of points at which the derivative is to be computed. Returns ------- rosen_der : (N,) ndarray The gradient of the Rosenbrock function at `x`. See Also -------- rosen, rosen_hess, rosen_hess_prod """ x = asarray(x) xm = x[1:-1] xm_m1 = x[:-2] xm_p1 = x[2:] der = numpy.zeros_like(x) der[1:-1] = (200 * (xm - xm_m1**2) - 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) der[-1] = 200 * (x[-1] - x[-2]**2) return der def rosen_hess(x): """ The Hessian matrix of the Rosenbrock function. Parameters ---------- x : array_like 1-D array of points at which the Hessian matrix is to be computed. Returns ------- rosen_hess : ndarray The Hessian matrix of the Rosenbrock function at `x`. See Also -------- rosen, rosen_der, rosen_hess_prod """ x = atleast_1d(x) H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1) diagonal = numpy.zeros(len(x), dtype=x.dtype) diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 diagonal[-1] = 200 diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] H = H + numpy.diag(diagonal) return H def rosen_hess_prod(x, p): """ Product of the Hessian matrix of the Rosenbrock function with a vector. Parameters ---------- x : array_like 1-D array of points at which the Hessian matrix is to be computed. p : array_like 1-D array, the vector to be multiplied by the Hessian matrix. Returns ------- rosen_hess_prod : ndarray The Hessian matrix of the Rosenbrock function at `x` multiplied by the vector `p`. See Also -------- rosen, rosen_der, rosen_hess """ x = atleast_1d(x) Hp = numpy.zeros(len(x), dtype=x.dtype) Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1] Hp[1:-1] = (-400 * x[:-2] * p[:-2] + (202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] - 400 * x[1:-1] * p[2:]) Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1] return Hp def wrap_function(function, args): ncalls = [0] if function is None: return ncalls, None def function_wrapper(*wrapper_args): ncalls[0] += 1 return function(*(wrapper_args + args)) return ncalls, function_wrapper def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None): """ Minimize a function using the downhill simplex algorithm. This algorithm only uses function values, not derivatives or second derivatives. Parameters ---------- func : callable func(x,*args) The objective function to be minimized. x0 : ndarray Initial guess. args : tuple, optional Extra arguments passed to func, i.e. ``f(x,*args)``. callback : callable, optional Called after each iteration, as callback(xk), where xk is the current parameter vector. xtol : float, optional Relative error in xopt acceptable for convergence. ftol : number, optional Relative error in func(xopt) acceptable for convergence. maxiter : int, optional Maximum number of iterations to perform. maxfun : number, optional Maximum number of function evaluations to make. full_output : bool, optional Set to True if fopt and warnflag outputs are desired. disp : bool, optional Set to True to print convergence messages. retall : bool, optional Set to True to return list of solutions at each iteration. Returns ------- xopt : ndarray Parameter that minimizes function. fopt : float Value of function at minimum: ``fopt = func(xopt)``. iter : int Number of iterations performed. funcalls : int Number of function calls made. warnflag : int 1 : Maximum number of function evaluations made. 2 : Maximum number of iterations reached. allvecs : list Solution at each iteration. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'Nelder-Mead' `method` in particular. Notes ----- Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. This algorithm has a long history of successful use in applications. But it will usually be slower than an algorithm that uses first or second derivative information. In practice it can have poor performance in high-dimensional problems and is not robust to minimizing complicated functions. Additionally, there currently is no complete theory describing when the algorithm will successfully converge to the minimum, or how fast it will if it does. References ---------- .. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function minimization", The Computer Journal, 7, pp. 308-313 .. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now Respectable", in Numerical Analysis 1995, Proceedings of the 1995 Dundee Biennial Conference in Numerical Analysis, D.F. Griffiths and G.A. Watson (Eds.), Addison Wesley Longman, Harlow, UK, pp. 191-208. """ opts = {'xtol': xtol, 'ftol': ftol, 'maxiter': maxiter, 'maxfev': maxfun, 'disp': disp, 'return_all': retall} res = _minimize_neldermead(func, x0, args, callback=callback, **opts) if full_output: retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status'] if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_neldermead(func, x0, args=(), callback=None, xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None, disp=False, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the Nelder-Mead algorithm. Options ------- disp : bool Set to True to print convergence messages. xtol : float Relative error in solution `xopt` acceptable for convergence. ftol : float Relative error in ``fun(xopt)`` acceptable for convergence. maxiter : int Maximum number of iterations to perform. maxfev : int Maximum number of function evaluations to make. """ _check_unknown_options(unknown_options) maxfun = maxfev retall = return_all fcalls, func = wrap_function(func, args) x0 = asfarray(x0).flatten() N = len(x0) if maxiter is None: maxiter = N * 200 if maxfun is None: maxfun = N * 200 rho = 1 chi = 2 psi = 0.5 sigma = 0.5 one2np1 = list(range(1, N + 1)) sim = numpy.zeros((N + 1, N), dtype=x0.dtype) fsim = numpy.zeros((N + 1,), float) sim[0] = x0 if retall: allvecs = [sim[0]] fsim[0] = func(x0) nonzdelt = 0.05 zdelt = 0.00025 for k in range(0, N): y = numpy.array(x0, copy=True) if y[k] != 0: y[k] = (1 + nonzdelt)*y[k] else: y[k] = zdelt sim[k + 1] = y f = func(y) fsim[k + 1] = f ind = numpy.argsort(fsim) fsim = numpy.take(fsim, ind, 0) # sort so sim[0,:] has the lowest function value sim = numpy.take(sim, ind, 0) iterations = 1 while (fcalls[0] < maxfun and iterations < maxiter): if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xtol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= ftol): break xbar = numpy.add.reduce(sim[:-1], 0) / N xr = (1 + rho) * xbar - rho * sim[-1] fxr = func(xr) doshrink = 0 if fxr < fsim[0]: xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] fxe = func(xe) if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] fxc = func(xc) if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink = 1 else: # Perform an inside contraction xcc = (1 - psi) * xbar + psi * sim[-1] fxcc = func(xcc) if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma * (sim[j] - sim[0]) fsim[j] = func(sim[j]) ind = numpy.argsort(fsim) sim = numpy.take(sim, ind, 0) fsim = numpy.take(fsim, ind, 0) if callback is not None: callback(sim[0]) iterations += 1 if retall: allvecs.append(sim[0]) x = sim[0] fval = numpy.min(fsim) warnflag = 0 if fcalls[0] >= maxfun: warnflag = 1 msg = _status_message['maxfev'] if disp: print('Warning: ' + msg) elif iterations >= maxiter: warnflag = 2 msg = _status_message['maxiter'] if disp: print('Warning: ' + msg) else: msg = _status_message['success'] if disp: print(msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % iterations) print(" Function evaluations: %d" % fcalls[0]) result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0], status=warnflag, success=(warnflag == 0), message=msg, x=x) if retall: result['allvecs'] = allvecs return result def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None): """ See ``approx_fprime``. An optional initial function value arg is added. """ if f0 is None: f0 = f(*((xk,) + args)) grad = numpy.zeros((len(xk),), float) ei = numpy.zeros((len(xk),), float) for k in range(len(xk)): ei[k] = 1.0 d = epsilon * ei grad[k] = (f(*((xk + d,) + args)) - f0) / d[k] ei[k] = 0.0 return grad def approx_fprime(xk, f, epsilon, *args): """Finite-difference approximation of the gradient of a scalar function. Parameters ---------- xk : array_like The coordinate vector at which to determine the gradient of `f`. f : callable The function of which to determine the gradient (partial derivatives). Should take `xk` as first argument, other arguments to `f` can be supplied in ``*args``. Should return a scalar, the value of the function at `xk`. epsilon : array_like Increment to `xk` to use for determining the function gradient. If a scalar, uses the same finite difference delta for all partial derivatives. If an array, should contain one value per element of `xk`. \*args : args, optional Any other arguments that are to be passed to `f`. Returns ------- grad : ndarray The partial derivatives of `f` to `xk`. See Also -------- check_grad : Check correctness of gradient function against approx_fprime. Notes ----- The function gradient is determined by the forward finite difference formula:: f(xk[i] + epsilon[i]) - f(xk[i]) f'[i] = --------------------------------- epsilon[i] The main use of `approx_fprime` is in scalar function optimizers like `fmin_bfgs`, to determine numerically the Jacobian of a function. Examples -------- >>> from scipy import optimize >>> def func(x, c0, c1): ... "Coordinate vector `x` should be an array of size two." ... return c0 * x[0]**2 + c1*x[1]**2 >>> x = np.ones(2) >>> c0, c1 = (1, 200) >>> eps = np.sqrt(np.finfo(float).eps) >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1) array([ 2. , 400.00004198]) """ return _approx_fprime_helper(xk, f, epsilon, args=args) def check_grad(func, grad, x0, *args, **kwargs): """Check the correctness of a gradient function by comparing it against a (forward) finite-difference approximation of the gradient. Parameters ---------- func : callable ``func(x0, *args)`` Function whose derivative is to be checked. grad : callable ``grad(x0, *args)`` Gradient of `func`. x0 : ndarray Points to check `grad` against forward difference approximation of grad using `func`. args : \*args, optional Extra arguments passed to `func` and `grad`. epsilon : float, optional Step size used for the finite difference approximation. It defaults to ``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08. Returns ------- err : float The square root of the sum of squares (i.e. the 2-norm) of the difference between ``grad(x0, *args)`` and the finite difference approximation of `grad` using func at the points `x0`. See Also -------- approx_fprime Examples -------- >>> def func(x): ... return x[0]**2 - 0.5 * x[1]**3 >>> def grad(x): ... return [2 * x[0], -1.5 * x[1]**2] >>> from scipy.optimize import check_grad >>> check_grad(func, grad, [1.5, -1.5]) 2.9802322387695312e-08 """ step = kwargs.pop('epsilon', _epsilon) if kwargs: raise ValueError("Unknown keyword arguments: %r" % (list(kwargs.keys()),)) return sqrt(sum((grad(x0, *args) - approx_fprime(x0, func, step, *args))**2)) def approx_fhess_p(x0, p, fprime, epsilon, *args): f2 = fprime(*((x0 + epsilon*p,) + args)) f1 = fprime(*((x0,) + args)) return (f2 - f1) / epsilon class _LineSearchError(RuntimeError): pass def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs): """ Same as line_search_wolfe1, but fall back to line_search_wolfe2 if suitable step length is not found, and raise an exception if a suitable step length is not found. Raises ------ _LineSearchError If no suitable step size is found """ ret = line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs) if ret[0] is None: # line search failed: try different one. with warnings.catch_warnings(): warnings.simplefilter('ignore', LineSearchWarning) ret = line_search_wolfe2(f, fprime, xk, pk, gfk, old_fval, old_old_fval) if ret[0] is None: raise _LineSearchError() return ret def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """ Minimize a function using the BFGS algorithm. Parameters ---------- f : callable f(x,*args) Objective function to be minimized. x0 : ndarray Initial guess. fprime : callable f'(x,*args), optional Gradient of f. args : tuple, optional Extra arguments passed to f and fprime. gtol : float, optional Gradient norm must be less than gtol before successful termination. norm : float, optional Order of norm (Inf is max, -Inf is min) epsilon : int or ndarray, optional If fprime is approximated, use this value for the step size. callback : callable, optional An optional user-supplied function to call after each iteration. Called as callback(xk), where xk is the current parameter vector. maxiter : int, optional Maximum number of iterations to perform. full_output : bool, optional If True,return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp : bool, optional Print convergence message if True. retall : bool, optional Return a list of results at each iteration if True. Returns ------- xopt : ndarray Parameters which minimize f, i.e. f(xopt) == fopt. fopt : float Minimum value. gopt : ndarray Value of gradient at minimum, f'(xopt), which should be near 0. Bopt : ndarray Value of 1/f''(xopt), i.e. the inverse hessian matrix. func_calls : int Number of function_calls made. grad_calls : int Number of gradient calls made. warnflag : integer 1 : Maximum number of iterations exceeded. 2 : Gradient and/or function calls not changing. allvecs : list `OptimizeResult` at each iteration. Only returned if retall is True. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'BFGS' `method` in particular. Notes ----- Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) References ---------- Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. """ opts = {'gtol': gtol, 'norm': norm, 'eps': epsilon, 'disp': disp, 'maxiter': maxiter, 'return_all': retall} res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts) if full_output: retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'], res['nfev'], res['njev'], res['status']) if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None, gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None, disp=False, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the BFGS algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter : int Maximum number of iterations to perform. gtol : float Gradient norm must be less than `gtol` before successful termination. norm : float Order of norm (Inf is max, -Inf is min). eps : float or ndarray If `jac` is approximated, use this value for the step size. """ _check_unknown_options(unknown_options) f = fun fprime = jac epsilon = eps retall = return_all x0 = asarray(x0).flatten() if x0.ndim == 0: x0.shape = (1,) if maxiter is None: maxiter = len(x0) * 200 func_calls, f = wrap_function(f, args) if fprime is None: grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) else: grad_calls, myfprime = wrap_function(fprime, args) gfk = myfprime(x0) k = 0 N = len(x0) I = numpy.eye(N, dtype=int) Hk = I old_fval = f(x0) old_old_fval = None xk = x0 if retall: allvecs = [x0] sk = [2 * gtol] warnflag = 0 gnorm = vecnorm(gfk, ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -numpy.dot(Hk, gfk) try: alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval, old_old_fval) except _LineSearchError: # Line search failed to find a better solution. warnflag = 2 break xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: gfkp1 = myfprime(xkp1) yk = gfkp1 - gfk gfk = gfkp1 if callback is not None: callback(xk) k += 1 gnorm = vecnorm(gfk, ord=norm) if (gnorm <= gtol): break if not numpy.isfinite(old_fval): # We correctly found +-Inf as optimal value, or something went # wrong. warnflag = 2 break try: # this was handled in numeric, let it remaines for more safety rhok = 1.0 / (numpy.dot(yk, sk)) except ZeroDivisionError: rhok = 1000.0 if disp: print("Divide-by-zero encountered: rhok assumed large") if isinf(rhok): # this is patch for numpy rhok = 1000.0 if disp: print("Divide-by-zero encountered: rhok assumed large") A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] * sk[numpy.newaxis, :]) fval = old_fval if warnflag == 2: msg = _status_message['pr_loss'] if disp: print("Warning: " + msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % func_calls[0]) print(" Gradient evaluations: %d" % grad_calls[0]) elif k >= maxiter: warnflag = 1 msg = _status_message['maxiter'] if disp: print("Warning: " + msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % func_calls[0]) print(" Gradient evaluations: %d" % grad_calls[0]) else: msg = _status_message['success'] if disp: print(msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % func_calls[0]) print(" Gradient evaluations: %d" % grad_calls[0]) result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0], njev=grad_calls[0], status=warnflag, success=(warnflag == 0), message=msg, x=xk, nit=k) if retall: result['allvecs'] = allvecs return result def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """ Minimize a function using a nonlinear conjugate gradient algorithm. Parameters ---------- f : callable, ``f(x, *args)`` Objective function to be minimized. Here `x` must be a 1-D array of the variables that are to be changed in the search for a minimum, and `args` are the other (fixed) parameters of `f`. x0 : ndarray A user-supplied initial estimate of `xopt`, the optimal value of `x`. It must be a 1-D array of values. fprime : callable, ``fprime(x, *args)``, optional A function that returns the gradient of `f` at `x`. Here `x` and `args` are as described above for `f`. The returned value must be a 1-D array. Defaults to None, in which case the gradient is approximated numerically (see `epsilon`, below). args : tuple, optional Parameter values passed to `f` and `fprime`. Must be supplied whenever additional fixed parameters are needed to completely specify the functions `f` and `fprime`. gtol : float, optional Stop when the norm of the gradient is less than `gtol`. norm : float, optional Order to use for the norm of the gradient (``-np.Inf`` is min, ``np.Inf`` is max). epsilon : float or ndarray, optional Step size(s) to use when `fprime` is approximated numerically. Can be a scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the floating point machine precision. Usually ``sqrt(eps)`` is about 1.5e-8. maxiter : int, optional Maximum number of iterations to perform. Default is ``200 * len(x0)``. full_output : bool, optional If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in addition to `xopt`. See the Returns section below for additional information on optional return values. disp : bool, optional If True, return a convergence message, followed by `xopt`. retall : bool, optional If True, add to the returned values the results of each iteration. callback : callable, optional An optional user-supplied function, called after each iteration. Called as ``callback(xk)``, where ``xk`` is the current value of `x0`. Returns ------- xopt : ndarray Parameters which minimize f, i.e. ``f(xopt) == fopt``. fopt : float, optional Minimum value found, f(xopt). Only returned if `full_output` is True. func_calls : int, optional The number of function_calls made. Only returned if `full_output` is True. grad_calls : int, optional The number of gradient calls made. Only returned if `full_output` is True. warnflag : int, optional Integer value with warning status, only returned if `full_output` is True. 0 : Success. 1 : The maximum number of iterations was exceeded. 2 : Gradient and/or function calls were not changing. May indicate that precision was lost, i.e., the routine did not converge. allvecs : list of ndarray, optional List of arrays, containing the results at each iteration. Only returned if `retall` is True. See Also -------- minimize : common interface to all `scipy.optimize` algorithms for unconstrained and constrained minimization of multivariate functions. It provides an alternative way to call ``fmin_cg``, by specifying ``method='CG'``. Notes ----- This conjugate gradient algorithm is based on that of Polak and Ribiere [1]_. Conjugate gradient methods tend to work better when: 1. `f` has a unique global minimizing point, and no local minima or other stationary points, 2. `f` is, at least locally, reasonably well approximated by a quadratic function of the variables, 3. `f` is continuous and has a continuous gradient, 4. `fprime` is not too large, e.g., has a norm less than 1000, 5. The initial guess, `x0`, is reasonably close to `f` 's global minimizing point, `xopt`. References ---------- .. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122. Examples -------- Example 1: seek the minimum value of the expression ``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values of the parameters and an initial guess ``(u, v) = (0, 0)``. >>> args = (2, 3, 7, 8, 9, 10) # parameter values >>> def f(x, *args): ... u, v = x ... a, b, c, d, e, f = args ... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f >>> def gradf(x, *args): ... u, v = x ... a, b, c, d, e, f = args ... gu = 2*a*u + b*v + d # u-component of the gradient ... gv = b*u + 2*c*v + e # v-component of the gradient ... return np.asarray((gu, gv)) >>> x0 = np.asarray((0, 0)) # Initial guess. >>> from scipy import optimize >>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args) >>> print('res1 = ', res1) Optimization terminated successfully. Current function value: 1.617021 Iterations: 2 Function evaluations: 5 Gradient evaluations: 5 res1 = [-1.80851064 -0.25531915] Example 2: solve the same problem using the `minimize` function. (This `myopts` dictionary shows all of the available options, although in practice only non-default values would be needed. The returned value will be a dictionary.) >>> opts = {'maxiter' : None, # default value. ... 'disp' : True, # non-default value. ... 'gtol' : 1e-5, # default value. ... 'norm' : np.inf, # default value. ... 'eps' : 1.4901161193847656e-08} # default value. >>> res2 = optimize.minimize(f, x0, jac=gradf, args=args, ... method='CG', options=opts) Optimization terminated successfully. Current function value: 1.617021 Iterations: 2 Function evaluations: 5 Gradient evaluations: 5 >>> res2.x # minimum found array([-1.80851064 -0.25531915]) """ opts = {'gtol': gtol, 'norm': norm, 'eps': epsilon, 'disp': disp, 'maxiter': maxiter, 'return_all': retall} res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts) if full_output: retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status'] if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_cg(fun, x0, args=(), jac=None, callback=None, gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None, disp=False, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the conjugate gradient algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter : int Maximum number of iterations to perform. gtol : float Gradient norm must be less than `gtol` before successful termination. norm : float Order of norm (Inf is max, -Inf is min). eps : float or ndarray If `jac` is approximated, use this value for the step size. """ _check_unknown_options(unknown_options) f = fun fprime = jac epsilon = eps retall = return_all x0 = asarray(x0).flatten() if maxiter is None: maxiter = len(x0) * 200 func_calls, f = wrap_function(f, args) if fprime is None: grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) else: grad_calls, myfprime = wrap_function(fprime, args) gfk = myfprime(x0) k = 0 xk = x0 old_fval = f(xk) old_old_fval = None if retall: allvecs = [xk] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk, ord=norm) while (gnorm > gtol) and (k < maxiter): deltak = numpy.dot(gfk, gfk) try: alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval, old_old_fval, c2=0.4) except _LineSearchError: # Line search failed to find a better solution. warnflag = 2 break xk = xk + alpha_k * pk if retall: allvecs.append(xk) if gfkp1 is None: gfkp1 = myfprime(xk) yk = gfkp1 - gfk beta_k = max(0, numpy.dot(yk, gfkp1) / deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 gnorm = vecnorm(gfk, ord=norm) if callback is not None: callback(xk) k += 1 fval = old_fval if warnflag == 2: msg = _status_message['pr_loss'] if disp: print("Warning: " + msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % func_calls[0]) print(" Gradient evaluations: %d" % grad_calls[0]) elif k >= maxiter: warnflag = 1 msg = _status_message['maxiter'] if disp: print("Warning: " + msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % func_calls[0]) print(" Gradient evaluations: %d" % grad_calls[0]) else: msg = _status_message['success'] if disp: print(msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % func_calls[0]) print(" Gradient evaluations: %d" % grad_calls[0]) result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0], njev=grad_calls[0], status=warnflag, success=(warnflag == 0), message=msg, x=xk, nit=k) if retall: result['allvecs'] = allvecs return result def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """ Unconstrained minimization of a function using the Newton-CG method. Parameters ---------- f : callable ``f(x, *args)`` Objective function to be minimized. x0 : ndarray Initial guess. fprime : callable ``f'(x, *args)`` Gradient of f. fhess_p : callable ``fhess_p(x, p, *args)``, optional Function which computes the Hessian of f times an arbitrary vector, p. fhess : callable ``fhess(x, *args)``, optional Function to compute the Hessian matrix of f. args : tuple, optional Extra arguments passed to f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon : float or ndarray, optional If fhess is approximated, use this value for the step size. callback : callable, optional An optional user-supplied function which is called after each iteration. Called as callback(xk), where xk is the current parameter vector. avextol : float, optional Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter : int, optional Maximum number of iterations to perform. full_output : bool, optional If True, return the optional outputs. disp : bool, optional If True, print convergence message. retall : bool, optional If True, return a list of results at each iteration. Returns ------- xopt : ndarray Parameters which minimize f, i.e. ``f(xopt) == fopt``. fopt : float Value of the function at xopt, i.e. ``fopt = f(xopt)``. fcalls : int Number of function calls made. gcalls : int Number of gradient calls made. hcalls : int Number of hessian calls made. warnflag : int Warnings generated by the algorithm. 1 : Maximum number of iterations exceeded. allvecs : list The result at each iteration, if retall is True (see below). See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'Newton-CG' `method` in particular. Notes ----- Only one of `fhess_p` or `fhess` need to be given. If `fhess` is provided, then `fhess_p` will be ignored. If neither `fhess` nor `fhess_p` is provided, then the hessian product will be approximated using finite differences on `fprime`. `fhess_p` must compute the hessian times an arbitrary vector. If it is not given, finite-differences on `fprime` are used to compute it. Newton-CG methods are also called truncated Newton methods. This function differs from scipy.optimize.fmin_tnc because 1. scipy.optimize.fmin_ncg is written purely in python using numpy and scipy while scipy.optimize.fmin_tnc calls a C function. 2. scipy.optimize.fmin_ncg is only for unconstrained minimization while scipy.optimize.fmin_tnc is for unconstrained minimization or box constrained minimization. (Box constraints give lower and upper bounds for each variable separately.) References ---------- Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140. """ opts = {'xtol': avextol, 'eps': epsilon, 'maxiter': maxiter, 'disp': disp, 'return_all': retall} res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, callback=callback, **opts) if full_output: retlist = (res['x'], res['fun'], res['nfev'], res['njev'], res['nhev'], res['status']) if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None, callback=None, xtol=1e-5, eps=_epsilon, maxiter=None, disp=False, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the Newton-CG algorithm. Note that the `jac` parameter (Jacobian) is required. Options ------- disp : bool Set to True to print convergence messages. xtol : float Average relative error in solution `xopt` acceptable for convergence. maxiter : int Maximum number of iterations to perform. eps : float or ndarray If `jac` is approximated, use this value for the step size. """ _check_unknown_options(unknown_options) if jac is None: raise ValueError('Jacobian is required for Newton-CG method') f = fun fprime = jac fhess_p = hessp fhess = hess avextol = xtol epsilon = eps retall = return_all x0 = asarray(x0).flatten() fcalls, f = wrap_function(f, args) gcalls, fprime = wrap_function(fprime, args) hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0) * avextol update = [2 * xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0) old_old_fval = None float64eps = numpy.finfo(numpy.float64).eps warnflag = 0 while (numpy.add.reduce(numpy.abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -fprime(xk) maggrad = numpy.add.reduce(numpy.abs(b)) eta = numpy.min([0.5, numpy.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), dtype=x0.dtype) ri = -b psupi = -ri i = 0 dri0 = numpy.dot(ri, ri) if fhess is not None: # you want to compute hessian once. A = fhess(*(xk,) + args) hcalls = hcalls + 1 while numpy.add.reduce(numpy.abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = approx_fhess_p(xk, psupi, fprime, epsilon) else: Ap = fhess_p(xk, psupi, *args) hcalls = hcalls + 1 else: Ap = numpy.dot(A, psupi) # check curvature Ap = asarray(Ap).squeeze() # get rid of matrices... curv = numpy.dot(psupi, Ap) if 0 <= curv <= 3 * float64eps: break elif curv < 0: if (i > 0): break else: # fall back to steepest descent direction xsupi = dri0 / (-curv) * b break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = numpy.dot(ri, ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update numpy.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk try: alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \ _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval) except _LineSearchError: # Line search failed to find a better solution. warnflag = 2 break update = alphak * pk xk = xk + update # upcast if necessary if callback is not None: callback(xk) if retall: allvecs.append(xk) k += 1 fval = old_fval if warnflag == 2: msg = _status_message['pr_loss'] if disp: print("Warning: " + msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % fcalls[0]) print(" Gradient evaluations: %d" % gcalls[0]) print(" Hessian evaluations: %d" % hcalls) elif k >= maxiter: warnflag = 1 msg = _status_message['maxiter'] if disp: print("Warning: " + msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % fcalls[0]) print(" Gradient evaluations: %d" % gcalls[0]) print(" Hessian evaluations: %d" % hcalls) else: msg = _status_message['success'] if disp: print(msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % fcalls[0]) print(" Gradient evaluations: %d" % gcalls[0]) print(" Hessian evaluations: %d" % hcalls) result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0], njev=gcalls[0], nhev=hcalls, status=warnflag, success=(warnflag == 0), message=msg, x=xk, nit=k) if retall: result['allvecs'] = allvecs return result def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, full_output=0, disp=1): """Bounded minimization for scalar functions. Parameters ---------- func : callable f(x,*args) Objective function to be minimized (must accept and return scalars). x1, x2 : float or array scalar The optimization bounds. args : tuple, optional Extra arguments passed to function. xtol : float, optional The convergence tolerance. maxfun : int, optional Maximum number of function evaluations allowed. full_output : bool, optional If True, return optional outputs. disp : int, optional If non-zero, print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. Returns ------- xopt : ndarray Parameters (over given interval) which minimize the objective function. fval : number The function value at the minimum point. ierr : int An error flag (0 if converged, 1 if maximum number of function calls reached). numfunc : int The number of function calls made. See also -------- minimize_scalar: Interface to minimization algorithms for scalar univariate functions. See the 'Bounded' `method` in particular. Notes ----- Finds a local minimizer of the scalar function `func` in the interval x1 < xopt < x2 using Brent's method. (See `brent` for auto-bracketing). """ options = {'xatol': xtol, 'maxiter': maxfun, 'disp': disp} res = _minimize_scalar_bounded(func, (x1, x2), args, **options) if full_output: return res['x'], res['fun'], res['status'], res['nfev'] else: return res['x'] def _minimize_scalar_bounded(func, bounds, args=(), xatol=1e-5, maxiter=500, disp=0, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. xatol : float Absolute error in solution `xopt` acceptable for convergence. """ _check_unknown_options(unknown_options) maxfun = maxiter # Test bounds are of correct form if len(bounds) != 2: raise ValueError('bounds must have two elements.') x1, x2 = bounds if not (is_array_scalar(x1) and is_array_scalar(x2)): raise ValueError("Optimisation bounds must be scalars" " or array scalars.") if x1 > x2: raise ValueError("The lower bound exceeds the upper bound.") flag = 0 header = ' Func-count x f(x) Procedure' step = ' initial' sqrt_eps = sqrt(2.2e-16) golden_mean = 0.5 * (3.0 - sqrt(5.0)) a, b = x1, x2 fulc = a + golden_mean * (b - a) nfc, xf = fulc, fulc rat = e = 0.0 x = xf fx = func(x, *args) num = 1 fmin_data = (1, xf, fx) ffulc = fnfc = fx xm = 0.5 * (a + b) tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0 tol2 = 2.0 * tol1 if disp > 2: print(" ") print(header) print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))): golden = 1 # Check for parabolic fit if numpy.abs(e) > tol1: golden = 0 r = (xf - nfc) * (fx - ffulc) q = (xf - fulc) * (fx - fnfc) p = (xf - fulc) * q - (xf - nfc) * r q = 2.0 * (q - r) if q > 0.0: p = -p q = numpy.abs(q) r = e e = rat # Check for acceptability of parabola if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and (p < q * (b - xf))): rat = (p + 0.0) / q x = xf + rat step = ' parabolic' if ((x - a) < tol2) or ((b - x) < tol2): si = numpy.sign(xm - xf) + ((xm - xf) == 0) rat = tol1 * si else: # do a golden section step golden = 1 if golden: # Do a golden-section step if xf >= xm: e = a - xf else: e = b - xf rat = golden_mean*e step = ' golden' si = numpy.sign(rat) + (rat == 0) x = xf + si * numpy.max([numpy.abs(rat), tol1]) fu = func(x, *args) num += 1 fmin_data = (num, x, fu) if disp > 2: print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) if fu <= fx: if x >= xf: a = xf else: b = xf fulc, ffulc = nfc, fnfc nfc, fnfc = xf, fx xf, fx = x, fu else: if x < xf: a = x else: b = x if (fu <= fnfc) or (nfc == xf): fulc, ffulc = nfc, fnfc nfc, fnfc = x, fu elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): fulc, ffulc = x, fu xm = 0.5 * (a + b) tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0 tol2 = 2.0 * tol1 if num >= maxfun: flag = 1 break fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xatol, disp) result = OptimizeResult(fun=fval, status=flag, success=(flag == 0), message={0: 'Solution found.', 1: 'Maximum number of function calls ' 'reached.'}.get(flag, ''), x=xf, nfev=num) return result class Brent: #need to rethink design of __init__ def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, full_output=0): self.func = func self.args = args self.tol = tol self.maxiter = maxiter self._mintol = 1.0e-11 self._cg = 0.3819660 self.xmin = None self.fval = None self.iter = 0 self.funcalls = 0 # need to rethink design of set_bracket (new options, etc) def set_bracket(self, brack=None): self.brack = brack def get_bracket_info(self): #set up func = self.func args = self.args brack = self.brack ### BEGIN core bracket_info code ### ### carefully DOCUMENT any CHANGES in core ## if brack is None: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) elif len(brack) == 2: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args) elif len(brack) == 3: xa, xb, xc = brack if (xa > xc): # swap so xa < xc can be assumed xc, xa = xa, xc if not ((xa < xb) and (xb < xc)): raise ValueError("Not a bracketing interval.") fa = func(*((xa,) + args)) fb = func(*((xb,) + args)) fc = func(*((xc,) + args)) if not ((fb < fa) and (fb < fc)): raise ValueError("Not a bracketing interval.") funcalls = 3 else: raise ValueError("Bracketing interval must be " "length 2 or 3 sequence.") ### END core bracket_info code ### return xa, xb, xc, fa, fb, fc, funcalls def optimize(self): # set up for optimization func = self.func xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info() _mintol = self._mintol _cg = self._cg ################################# #BEGIN CORE ALGORITHM ################################# x = w = v = xb fw = fv = fx = func(*((x,) + self.args)) if (xa < xc): a = xa b = xc else: a = xc b = xa deltax = 0.0 funcalls = 1 iter = 0 while (iter < self.maxiter): tol1 = self.tol * numpy.abs(x) + _mintol tol2 = 2.0 * tol1 xmid = 0.5 * (a + b) # check for convergence if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)): break # XXX In the first iteration, rat is only bound in the true case # of this conditional. This used to cause an UnboundLocalError # (gh-4140). It should be set before the if (but to what?). if (numpy.abs(deltax) <= tol1): if (x >= xmid): deltax = a - x # do a golden section step else: deltax = b - x rat = _cg * deltax else: # do a parabolic step tmp1 = (x - w) * (fx - fv) tmp2 = (x - v) * (fx - fw) p = (x - v) * tmp2 - (x - w) * tmp1 tmp2 = 2.0 * (tmp2 - tmp1) if (tmp2 > 0.0): p = -p tmp2 = numpy.abs(tmp2) dx_temp = deltax deltax = rat # check parabolic fit if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and (numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))): rat = p * 1.0 / tmp2 # if parabolic step is useful. u = x + rat if ((u - a) < tol2 or (b - u) < tol2): if xmid - x >= 0: rat = tol1 else: rat = -tol1 else: if (x >= xmid): deltax = a - x # if it's not do a golden section step else: deltax = b - x rat = _cg * deltax if (numpy.abs(rat) < tol1): # update by at least tol1 if rat >= 0: u = x + tol1 else: u = x - tol1 else: u = x + rat fu = func(*((u,) + self.args)) # calculate new output value funcalls += 1 if (fu > fx): # if it's bigger than current if (u < x): a = u else: b = u if (fu <= fw) or (w == x): v = w w = u fv = fw fw = fu elif (fu <= fv) or (v == x) or (v == w): v = u fv = fu else: if (u >= x): a = x else: b = x v = w w = x x = u fv = fw fw = fx fx = fu iter += 1 ################################# #END CORE ALGORITHM ################################# self.xmin = x self.fval = fx self.iter = iter self.funcalls = funcalls def get_result(self, full_output=False): if full_output: return self.xmin, self.fval, self.iter, self.funcalls else: return self.xmin def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): """ Given a function of one-variable and a possible bracketing interval, return the minimum of the function isolated to a fractional precision of tol. Parameters ---------- func : callable f(x,*args) Objective function. args : tuple, optional Additional arguments (if present). brack : tuple, optional Triple (a,b,c) where (a<b<c) and func(b) < func(a),func(c). If bracket consists of two numbers (a,c) then they are assumed to be a starting interval for a downhill bracket search (see `bracket`); it doesn't always mean that the obtained solution will satisfy a<=x<=c. tol : float, optional Stop if between iteration change is less than `tol`. full_output : bool, optional If True, return all output args (xmin, fval, iter, funcalls). maxiter : int, optional Maximum number of iterations in solution. Returns ------- xmin : ndarray Optimum point. fval : float Optimum value. iter : int Number of iterations. funcalls : int Number of objective function evaluations made. See also -------- minimize_scalar: Interface to minimization algorithms for scalar univariate functions. See the 'Brent' `method` in particular. Notes ----- Uses inverse parabolic interpolation when possible to speed up convergence of golden section method. """ options = {'xtol': tol, 'maxiter': maxiter} res = _minimize_scalar_brent(func, brack, args, **options) if full_output: return res['x'], res['fun'], res['nit'], res['nfev'] else: return res['x'] def _minimize_scalar_brent(func, brack=None, args=(), xtol=1.48e-8, maxiter=500, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. xtol : float Relative error in solution `xopt` acceptable for convergence. Notes ----- Uses inverse parabolic interpolation when possible to speed up convergence of golden section method. """ _check_unknown_options(unknown_options) tol = xtol if tol < 0: raise ValueError('tolerance should be >= 0, got %r' % tol) brent = Brent(func=func, args=args, tol=tol, full_output=True, maxiter=maxiter) brent.set_bracket(brack) brent.optimize() x, fval, nit, nfev = brent.get_result(full_output=True) return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev, success=nit < maxiter) def golden(func, args=(), brack=None, tol=_epsilon, full_output=0): """ Return the minimum of a function of one variable. Given a function of one variable and a possible bracketing interval, return the minimum of the function isolated to a fractional precision of tol. Parameters ---------- func : callable func(x,*args) Objective function to minimize. args : tuple, optional Additional arguments (if present), passed to func. brack : tuple, optional Triple (a,b,c), where (a<b<c) and func(b) < func(a),func(c). If bracket consists of two numbers (a, c), then they are assumed to be a starting interval for a downhill bracket search (see `bracket`); it doesn't always mean that obtained solution will satisfy a<=x<=c. tol : float, optional x tolerance stop criterion full_output : bool, optional If True, return optional outputs. See also -------- minimize_scalar: Interface to minimization algorithms for scalar univariate functions. See the 'Golden' `method` in particular. Notes ----- Uses analog of bisection method to decrease the bracketed interval. """ options = {'xtol': tol} res = _minimize_scalar_golden(func, brack, args, **options) if full_output: return res['x'], res['fun'], res['nfev'] else: return res['x'] def _minimize_scalar_golden(func, brack=None, args=(), xtol=_epsilon, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. xtol : float Relative error in solution `xopt` acceptable for convergence. """ _check_unknown_options(unknown_options) tol = xtol if brack is None: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) elif len(brack) == 2: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args) elif len(brack) == 3: xa, xb, xc = brack if (xa > xc): # swap so xa < xc can be assumed xc, xa = xa, xc if not ((xa < xb) and (xb < xc)): raise ValueError("Not a bracketing interval.") fa = func(*((xa,) + args)) fb = func(*((xb,) + args)) fc = func(*((xc,) + args)) if not ((fb < fa) and (fb < fc)): raise ValueError("Not a bracketing interval.") funcalls = 3 else: raise ValueError("Bracketing interval must be length 2 or 3 sequence.") _gR = 0.61803399 _gC = 1.0 - _gR x3 = xc x0 = xa if (numpy.abs(xc - xb) > numpy.abs(xb - xa)): x1 = xb x2 = xb + _gC * (xc - xb) else: x2 = xb x1 = xb - _gC * (xb - xa) f1 = func(*((x1,) + args)) f2 = func(*((x2,) + args)) funcalls += 2 while (numpy.abs(x3 - x0) > tol * (numpy.abs(x1) + numpy.abs(x2))): if (f2 < f1): x0 = x1 x1 = x2 x2 = _gR * x1 + _gC * x3 f1 = f2 f2 = func(*((x2,) + args)) else: x3 = x2 x2 = x1 x1 = _gR * x2 + _gC * x0 f2 = f1 f1 = func(*((x1,) + args)) funcalls += 1 if (f1 < f2): xmin = x1 fval = f1 else: xmin = x2 fval = f2 return OptimizeResult(fun=fval, nfev=funcalls, x=xmin) def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000): """ Bracket the minimum of the function. Given a function and distinct initial points, search in the downhill direction (as defined by the initital points) and return new points xa, xb, xc that bracket the minimum of the function f(xa) > f(xb) < f(xc). It doesn't always mean that obtained solution will satisfy xa<=x<=xb Parameters ---------- func : callable f(x,*args) Objective function to minimize. xa, xb : float, optional Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0. args : tuple, optional Additional arguments (if present), passed to `func`. grow_limit : float, optional Maximum grow limit. Defaults to 110.0 maxiter : int, optional Maximum number of iterations to perform. Defaults to 1000. Returns ------- xa, xb, xc : float Bracket. fa, fb, fc : float Objective function values in bracket. funcalls : int Number of function evaluations made. """ _gold = 1.618034 _verysmall_num = 1e-21 fa = func(*(xa,) + args) fb = func(*(xb,) + args) if (fa < fb): # Switch so fa > fb xa, xb = xb, xa fa, fb = fb, fa xc = xb + _gold * (xb - xa) fc = func(*((xc,) + args)) funcalls = 3 iter = 0 while (fc < fb): tmp1 = (xb - xa) * (fb - fc) tmp2 = (xb - xc) * (fb - fa) val = tmp2 - tmp1 if numpy.abs(val) < _verysmall_num: denom = 2.0 * _verysmall_num else: denom = 2.0 * val w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom wlim = xb + grow_limit * (xc - xb) if iter > maxiter: raise RuntimeError("Too many iterations.") iter += 1 if (w - xc) * (xb - w) > 0.0: fw = func(*((w,) + args)) funcalls += 1 if (fw < fc): xa = xb xb = w fa = fb fb = fw return xa, xb, xc, fa, fb, fc, funcalls elif (fw > fb): xc = w fc = fw return xa, xb, xc, fa, fb, fc, funcalls w = xc + _gold * (xc - xb) fw = func(*((w,) + args)) funcalls += 1 elif (w - wlim)*(wlim - xc) >= 0.0: w = wlim fw = func(*((w,) + args)) funcalls += 1 elif (w - wlim)*(xc - w) > 0.0: fw = func(*((w,) + args)) funcalls += 1 if (fw < fc): xb = xc xc = w w = xc + _gold * (xc - xb) fb = fc fc = fw fw = func(*((w,) + args)) funcalls += 1 else: w = xc + _gold * (xc - xb) fw = func(*((w,) + args)) funcalls += 1 xa = xb xb = xc xc = w fa = fb fb = fc fc = fw return xa, xb, xc, fa, fb, fc, funcalls def _linesearch_powell(func, p, xi, tol=1e-3): """Line-search algorithm using fminbound. Find the minimium of the function ``func(x0+ alpha*direc)``. """ def myfunc(alpha): return func(p + alpha*xi) alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol) xi = alpha_min*xi return squeeze(fret), p + xi, xi def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, direc=None): """ Minimize a function using modified Powell's method. This method only uses function values, not derivatives. Parameters ---------- func : callable f(x,*args) Objective function to be minimized. x0 : ndarray Initial guess. args : tuple, optional Extra arguments passed to func. callback : callable, optional An optional user-supplied function, called after each iteration. Called as ``callback(xk)``, where ``xk`` is the current parameter vector. direc : ndarray, optional Initial direction set. xtol : float, optional Line-search error tolerance. ftol : float, optional Relative error in ``func(xopt)`` acceptable for convergence. maxiter : int, optional Maximum number of iterations to perform. maxfun : int, optional Maximum number of function evaluations to make. full_output : bool, optional If True, fopt, xi, direc, iter, funcalls, and warnflag are returned. disp : bool, optional If True, print convergence messages. retall : bool, optional If True, return a list of the solution at each iteration. Returns ------- xopt : ndarray Parameter which minimizes `func`. fopt : number Value of function at minimum: ``fopt = func(xopt)``. direc : ndarray Current direction set. iter : int Number of iterations. funcalls : int Number of function calls made. warnflag : int Integer warning flag: 1 : Maximum number of function evaluations. 2 : Maximum number of iterations. allvecs : list List of solutions at each iteration. See also -------- minimize: Interface to unconstrained minimization algorithms for multivariate functions. See the 'Powell' `method` in particular. Notes ----- Uses a modification of Powell's method to find the minimum of a function of N variables. Powell's method is a conjugate direction method. The algorithm has two loops. The outer loop merely iterates over the inner loop. The inner loop minimizes over each current direction in the direction set. At the end of the inner loop, if certain conditions are met, the direction that gave the largest decrease is dropped and replaced with the difference between the current estiamted x and the estimated x from the beginning of the inner-loop. The technical conditions for replacing the direction of greatest increase amount to checking that 1. No further gain can be made along the direction of greatest increase from that iteration. 2. The direction of greatest increase accounted for a large sufficient fraction of the decrease in the function value from that iteration of the inner loop. References ---------- Powell M.J.D. (1964) An efficient method for finding the minimum of a function of several variables without calculating derivatives, Computer Journal, 7 (2):155-162. Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.: Numerical Recipes (any edition), Cambridge University Press """ opts = {'xtol': xtol, 'ftol': ftol, 'maxiter': maxiter, 'maxfev': maxfun, 'disp': disp, 'direc': direc, 'return_all': retall} res = _minimize_powell(func, x0, args, callback=callback, **opts) if full_output: retlist = (res['x'], res['fun'], res['direc'], res['nit'], res['nfev'], res['status']) if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_powell(func, x0, args=(), callback=None, xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None, disp=False, direc=None, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the modified Powell algorithm. Options ------- disp : bool Set to True to print convergence messages. xtol : float Relative error in solution `xopt` acceptable for convergence. ftol : float Relative error in ``fun(xopt)`` acceptable for convergence. maxiter : int Maximum number of iterations to perform. maxfev : int Maximum number of function evaluations to make. direc : ndarray Initial set of direction vectors for the Powell method. """ _check_unknown_options(unknown_options) maxfun = maxfev retall = return_all # we need to use a mutable object here that we can update in the # wrapper function fcalls, func = wrap_function(func, args) x = asarray(x0).flatten() if retall: allvecs = [x] N = len(x) if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 if direc is None: direc = eye(N, dtype=float) else: direc = asarray(direc, dtype=float) fval = squeeze(func(x)) x1 = x.copy() iter = 0 ilist = list(range(N)) while True: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol * 100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if callback is not None: callback(x) if retall: allvecs.append(x) bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20 if 2.0 * (fx - fval) <= bnd: break if fcalls[0] >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(func(x2)) if (fx > fx2): t = 2.0*(fx + fx2 - 2.0*fval) temp = (fx - fval - delta) t *= temp*temp temp = fx - fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if fcalls[0] >= maxfun: warnflag = 1 msg = _status_message['maxfev'] if disp: print("Warning: " + msg) elif iter >= maxiter: warnflag = 2 msg = _status_message['maxiter'] if disp: print("Warning: " + msg) else: msg = _status_message['success'] if disp: print(msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % iter) print(" Function evaluations: %d" % fcalls[0]) x = squeeze(x) result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0], status=warnflag, success=(warnflag == 0), message=msg, x=x) if retall: result['allvecs'] = allvecs return result def _endprint(x, flag, fval, maxfun, xtol, disp): if flag == 0: if disp > 1: print("\nOptimization terminated successfully;\n" "The returned value satisfies the termination criteria\n" "(using xtol = ", xtol, ")") if flag == 1: if disp: print("\nMaximum number of function evaluations exceeded --- " "increase maxfun argument.\n") return def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin, disp=False): """Minimize a function over a given range by brute force. Uses the "brute force" method, i.e. computes the function's value at each point of a multidimensional grid of points, to find the global minimum of the function. The function is evaluated everywhere in the range with the datatype of the first call to the function, as enforced by the ``vectorize`` NumPy function. The value and type of the function evaluation returned when ``full_output=True`` are affected in addition by the ``finish`` argument (see Notes). Parameters ---------- func : callable The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. ranges : tuple Each component of the `ranges` tuple must be either a "slice object" or a range tuple of the form ``(low, high)``. The program uses these to create the grid of points on which the objective function will be computed. See `Note 2` for more detail. args : tuple, optional Any additional fixed parameters needed to completely specify the function. Ns : int, optional Number of grid points along the axes, if not otherwise specified. See `Note2`. full_output : bool, optional If True, return the evaluation grid and the objective function's values on it. finish : callable, optional An optimization function that is called with the result of brute force minimization as initial guess. `finish` should take `func` and the initial guess as positional arguments, and take `args` as keyword arguments. It may additionally take `full_output` and/or `disp` as keyword arguments. Use None if no "polishing" function is to be used. See Notes for more details. disp : bool, optional Set to True to print convergence messages. Returns ------- x0 : ndarray A 1-D array containing the coordinates of a point at which the objective function had its minimum value. (See `Note 1` for which point is returned.) fval : float Function value at the point `x0`. (Returned when `full_output` is True.) grid : tuple Representation of the evaluation grid. It has the same length as `x0`. (Returned when `full_output` is True.) Jout : ndarray Function values at each point of the evaluation grid, `i.e.`, ``Jout = func(*grid)``. (Returned when `full_output` is True.) See Also -------- basinhopping, differential_evolution Notes ----- *Note 1*: The program finds the gridpoint at which the lowest value of the objective function occurs. If `finish` is None, that is the point returned. When the global minimum occurs within (or not very far outside) the grid's boundaries, and the grid is fine enough, that point will be in the neighborhood of the gobal minimum. However, users often employ some other optimization program to "polish" the gridpoint values, `i.e.`, to seek a more precise (local) minimum near `brute's` best gridpoint. The `brute` function's `finish` option provides a convenient way to do that. Any polishing program used must take `brute's` output as its initial guess as a positional argument, and take `brute's` input values for `args` as keyword arguments, otherwise an error will be raised. It may additionally take `full_output` and/or `disp` as keyword arguments. `brute` assumes that the `finish` function returns either an `OptimizeResult` object or a tuple in the form: ``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing value of the argument, ``Jmin`` is the minimum value of the objective function, "..." may be some other returned values (which are not used by `brute`), and ``statuscode`` is the status code of the `finish` program. Note that when `finish` is not None, the values returned are those of the `finish` program, *not* the gridpoint ones. Consequently, while `brute` confines its search to the input grid points, the `finish` program's results usually will not coincide with any gridpoint, and may fall outside the grid's boundary. *Note 2*: The grid of points is a `numpy.mgrid` object. For `brute` the `ranges` and `Ns` inputs have the following effect. Each component of the `ranges` tuple can be either a slice object or a two-tuple giving a range of values, such as (0, 5). If the component is a slice object, `brute` uses it directly. If the component is a two-tuple range, `brute` internally converts it to a slice object that interpolates `Ns` points from its low-value to its high-value, inclusive. Examples -------- We illustrate the use of `brute` to seek the global minimum of a function of two variables that is given as the sum of a positive-definite quadratic and two deep "Gaussian-shaped" craters. Specifically, define the objective function `f` as the sum of three other functions, ``f = f1 + f2 + f3``. We suppose each of these has a signature ``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions are as defined below. >>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) >>> def f1(z, *params): ... x, y = z ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params ... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) >>> def f2(z, *params): ... x, y = z ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params ... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) >>> def f3(z, *params): ... x, y = z ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params ... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) >>> def f(z, *params): ... return f1(z, *params) + f2(z, *params) + f3(z, *params) Thus, the objective function may have local minima near the minimum of each of the three functions of which it is composed. To use `fmin` to polish its gridpoint result, we may then continue as follows: >>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) >>> from scipy import optimize >>> resbrute = optimize.brute(f, rranges, args=params, full_output=True, ... finish=optimize.fmin) >>> resbrute[0] # global minimum array([-1.05665192, 1.80834843]) >>> resbrute[1] # function value at global minimum -3.4085818767 Note that if `finish` had been set to None, we would have gotten the gridpoint [-1.0 1.75] where the rounded function value is -2.892. """ N = len(ranges) if N > 40: raise ValueError("Brute Force not possible with more " "than 40 variables.") lrange = list(ranges) for k in range(N): if type(lrange[k]) is not type(slice(None)): if len(lrange[k]) < 3: lrange[k] = tuple(lrange[k]) + (complex(Ns),) lrange[k] = slice(*lrange[k]) if (N == 1): lrange = lrange[0] def _scalarfunc(*params): params = squeeze(asarray(params)) return func(params, *args) vecfunc = vectorize(_scalarfunc) grid = mgrid[lrange] if (N == 1): grid = (grid,) Jout = vecfunc(*grid) Nshape = shape(Jout) indx = argmin(Jout.ravel(), axis=-1) Nindx = zeros(N, int) xmin = zeros(N, float) for k in range(N - 1, -1, -1): thisN = Nshape[k] Nindx[k] = indx % Nshape[k] indx = indx // thisN for k in range(N): xmin[k] = grid[k][tuple(Nindx)] Jmin = Jout[tuple(Nindx)] if (N == 1): grid = grid[0] xmin = xmin[0] if callable(finish): # set up kwargs for `finish` function finish_args = getargspec(finish).args finish_kwargs = dict() if 'full_output' in finish_args: finish_kwargs['full_output'] = 1 if 'disp' in finish_args: finish_kwargs['disp'] = disp elif 'options' in finish_args: # pass 'disp' as `options` # (e.g. if `finish` is `minimize`) finish_kwargs['options'] = {'disp': disp} # run minimizer res = finish(func, xmin, args=args, **finish_kwargs) if isinstance(res, OptimizeResult): xmin = res.x Jmin = res.fun success = res.success else: xmin = res[0] Jmin = res[1] success = res[-1] == 0 if not success: if disp: print("Warning: Either final optimization did not succeed " "or `finish` does not return `statuscode` as its last " "argument.") if full_output: return xmin, Jmin, grid, Jout else: return xmin def show_options(solver=None, method=None, disp=True): """ Show documentation for additional options of optimization solvers. These are method-specific options that can be supplied through the ``options`` dict. Parameters ---------- solver : str Type of optimization solver. One of 'minimize', 'minimize_scalar', 'root', or 'linprog'. method : str, optional If not given, shows all methods of the specified solver. Otherwise, show only the options for the specified method. Valid values corresponds to methods' names of respective solver (e.g. 'BFGS' for 'minimize'). disp : bool, optional Whether to print the result rather than returning it. Returns ------- text Either None (for disp=False) or the text string (disp=True) Notes ----- The solver-specific methods are: `scipy.optimize.minimize` - :ref:`Nelder-Mead <optimize.minimize-neldermead>` - :ref:`Powell <optimize.minimize-powell>` - :ref:`CG <optimize.minimize-cg>` - :ref:`BFGS <optimize.minimize-bfgs>` - :ref:`Newton-CG <optimize.minimize-newtoncg>` - :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` - :ref:`TNC <optimize.minimize-tnc>` - :ref:`COBYLA <optimize.minimize-cobyla>` - :ref:`SLSQP <optimize.minimize-slsqp>` - :ref:`dogleg <optimize.minimize-dogleg>` - :ref:`trust-ncg <optimize.minimize-trustncg>` `scipy.optimize.root` - :ref:`hybr <optimize.root-hybr>` - :ref:`lm <optimize.root-lm>` - :ref:`broyden1 <optimize.root-broyden1>` - :ref:`broyden2 <optimize.root-broyden2>` - :ref:`anderson <optimize.root-anderson>` - :ref:`linearmixing <optimize.root-linearmixing>` - :ref:`diagbroyden <optimize.root-diagbroyden>` - :ref:`excitingmixing <optimize.root-excitingmixing>` - :ref:`krylov <optimize.root-krylov>` - :ref:`df-sane <optimize.root-dfsane>` `scipy.optimize.minimize_scalar` - :ref:`brent <optimize.minimize_scalar-brent>` - :ref:`golden <optimize.minimize_scalar-golden>` - :ref:`bounded <optimize.minimize_scalar-bounded>` `scipy.optimize.linprog` - :ref:`simplex <optimize.linprog-simplex>` """ import textwrap doc_routines = { 'minimize': ( ('bfgs', 'scipy.optimize.optimize._minimize_bfgs'), ('cg', 'scipy.optimize.optimize._minimize_cg'), ('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'), ('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'), ('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'), ('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'), ('newtoncg', 'scipy.optimize.optimize._minimize_newtoncg'), ('powell', 'scipy.optimize.optimize._minimize_powell'), ('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'), ('tnc', 'scipy.optimize.tnc._minimize_tnc'), ('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'), ), 'root': ( ('hybr', 'scipy.optimize.minpack._root_hybr'), ('lm', 'scipy.optimize._root._root_leastsq'), ('broyden1', 'scipy.optimize._root._root_broyden1_doc'), ('broyden2', 'scipy.optimize._root._root_broyden2_doc'), ('anderson', 'scipy.optimize._root._root_anderson_doc'), ('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'), ('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'), ('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'), ('krylov', 'scipy.optimize._root._root_krylov_doc'), ('df-sane', 'scipy.optimize._spectral._root_df_sane'), ), 'linprog': ( ('simplex', 'scipy.optimize._linprog._linprog_simplex'), ), 'minimize_scalar': ( ('brent', 'scipy.optimize.optimize._minimize_scalar_brent'), ('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'), ('golden', 'scipy.optimize.optimize._minimize_scalar_golden'), ), } if solver is None: text = ["\n\n\n========\n", "minimize\n", "========\n"] text.append(show_options('minimize', disp=False)) text.extend(["\n\n===============\n", "minimize_scalar\n", "===============\n"]) text.append(show_options('minimize_scalar', disp=False)) text.extend(["\n\n\n====\n", "root\n", "====\n"]) text.append(show_options('root', disp=False)) text.extend(['\n\n\n=======\n', 'linprog\n', '=======\n']) text.append(show_options('linprog', disp=False)) text = "".join(text) else: solver = solver.lower() if solver not in doc_routines: raise ValueError('Unknown solver %r' % (solver,)) if method is None: text = [] for name, _ in doc_routines[solver]: text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"]) text.append(show_options(solver, name, disp=False)) text = "".join(text) else: methods = dict(doc_routines[solver]) if method not in methods: raise ValueError("Unknown method %r" % (method,)) name = methods[method] # Import function object parts = name.split('.') mod_name = ".".join(parts[:-1]) __import__(mod_name) obj = getattr(sys.modules[mod_name], parts[-1]) # Get doc doc = obj.__doc__ if doc is not None: text = textwrap.dedent(doc).strip() else: text = "" if disp: print(text) return else: return text def main(): import time times = [] algor = [] x0 = [0.8, 1.2, 0.7] print("Nelder-Mead Simplex") print("===================") start = time.time() x = fmin(rosen, x0) print(x) times.append(time.time() - start) algor.append('Nelder-Mead Simplex\t') print() print("Powell Direction Set Method") print("===========================") start = time.time() x = fmin_powell(rosen, x0) print(x) times.append(time.time() - start) algor.append('Powell Direction Set Method.') print() print("Nonlinear CG") print("============") start = time.time() x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) print(x) times.append(time.time() - start) algor.append('Nonlinear CG \t') print() print("BFGS Quasi-Newton") print("=================") start = time.time() x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) print(x) times.append(time.time() - start) algor.append('BFGS Quasi-Newton\t') print() print("BFGS approximate gradient") print("=========================") start = time.time() x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) print(x) times.append(time.time() - start) algor.append('BFGS without gradient\t') print() print("Newton-CG with Hessian product") print("==============================") start = time.time() x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80) print(x) times.append(time.time() - start) algor.append('Newton-CG with hessian product') print() print("Newton-CG with full Hessian") print("===========================") start = time.time() x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80) print(x) times.append(time.time() - start) algor.append('Newton-CG with full hessian') print() print("\nMinimizing the Rosenbrock function of order 3\n") print(" Algorithm \t\t\t Seconds") print("===========\t\t\t =========") for k in range(len(algor)): print(algor[k], "\t -- ", times[k]) if __name__ == "__main__": main()
ortylp/scipy
scipy/optimize/optimize.py
Python
bsd-3-clause
95,963
[ "Gaussian" ]
de4c502de2e125f1f10fff81dd5bde93d9999d4c94951b3fc7749aa3ebdb7d77
# GromacsWrapper: test_example.py # Copyright (c) 2009 Oliver Beckstein <orbeckst@gmail.com> # Released under the GNU Public License 3 (or higher, your choice) # See the file COPYING for details. import gromacs import pytest from numpy.testing import assert_equal from ..datafiles import datafile @pytest.fixture( params=['original', 'nofilename', 'written'] ) def SIMPLE_NDX(request, tmpdir): ndx = gromacs.fileformats.ndx.NDX(datafile('simple.ndx')) if request.param == 'written': out = str(tmpdir.join('out.ndx')) ndx.write(out) ndx = gromacs.fileformats.ndx.NDX(out) elif request.param == 'nofilename': ndx = gromacs.fileformats.ndx.NDX() ndx.read(datafile('simple.ndx')) return ndx def test_read(SIMPLE_NDX): ndx = SIMPLE_NDX assert_equal(ndx['Oxygen'], [1, 4, 7]) assert_equal(ndx['Hydrogen'], [2, 3, 5, 6, 8, 9]) def test_get(SIMPLE_NDX): assert_equal(SIMPLE_NDX.get('Oxygen'), [1, 4, 7]) def test_set(SIMPLE_NDX): SIMPLE_NDX['Nitrogen'] = [10, 11, 12] assert_equal(SIMPLE_NDX['Nitrogen'], [10, 11, 12]) def test_size(SIMPLE_NDX): assert len(SIMPLE_NDX) == 2 def test_sizes(SIMPLE_NDX): assert SIMPLE_NDX.sizes == {'Oxygen': 3, 'Hydrogen': 6} def test_groups(SIMPLE_NDX): assert list(SIMPLE_NDX.groups) == ['Oxygen', 'Hydrogen']
Becksteinlab/GromacsWrapper
tests/fileformats/test_ndx.py
Python
gpl-3.0
1,353
[ "Gromacs" ]
1c5a441ca9ced4d901d12bbbc0229fd6a93af6d1e51f8cf08ae417b4afbea810
######################################################################## # $HeadURL $ # File: RequestValidatorTests.py # Author: Krzysztof.Ciba@NOSPAMgmail.com # Date: 2012/09/25 13:49:20 ######################################################################## """ :mod: RequestValidatorTests ======================= .. module: RequestValidatorTests :synopsis: test cases for RequestValidator .. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com test cases for RequestValidator """ __RCSID__ = "$Id $" ## # @file RequestValidatorTests.py # @author Krzysztof.Ciba@NOSPAMgmail.com # @date 2012/09/25 13:49:31 # @brief Definition of RequestValidatorTests class. ## imports import unittest ## from DIRAC from DIRAC.RequestManagementSystem.Client.Request import Request from DIRAC.RequestManagementSystem.Client.Operation import Operation from DIRAC.RequestManagementSystem.Client.File import File ## SUT from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator ######################################################################## class RequestValidatorTests(unittest.TestCase): """ .. class:: RequestValidatorTests """ def setUp( self ): """ test setup """ self.request = Request() self.operation = Operation() self.file = File() def tearDown( self ): """ test tear down """ del self.request del self.operation del self.file def testValidator( self ): """ validator test """ ## create validator validator = RequestValidator() self.assertEqual( isinstance( validator, RequestValidator ), True ) ## RequestName not set ret = validator.validate( self.request ) self.assertEqual( ret, { 'Message' : 'RequestName not set', 'OK' : False } ) self.request.RequestName = "test_request" # # no operations ret = validator.validate( self.request ) self.assertEqual( ret, { 'Message' : "Operations not present in request 'test_request'", 'OK': False} ) self.request.addOperation( self.operation ) # # type not set ret = validator.validate( self.request ) self.assertEqual( ret, { 'Message' : "Operation #0 in request 'test_request' hasn't got Type set", 'OK' : False } ) self.operation.Type = "ReplicateAndRegister" # # files not present ret = validator.validate( self.request ) self.assertEqual( ret, { 'Message' : "Operation #0 of type 'ReplicateAndRegister' hasn't got files to process.", 'OK' : False } ) self.operation.addFile( self.file ) # # targetSE not set ret = validator.validate( self.request ) self.assertEqual( ret, { 'Message' : "Operation #0 of type 'ReplicateAndRegister' is missing TargetSE attribute.", 'OK': False } ) self.operation.TargetSE = "CERN-USER" # # missing LFN ret = validator.validate( self.request ) self.assertEqual( ret, { "Message" : "Operation #0 of type 'ReplicateAndRegister' is missing LFN attribute for file.", "OK": False } ) self.file.LFN = "/a/b/c" # # no ownerDN # force no owner DN because it takes the one of the current user self.request.OwnerDN = '' ret = validator.validate( self.request ) self.assertEqual( ret, { 'Message' : "Request 'test_request' is missing OwnerDN value", 'OK': False} ) self.request.OwnerDN = "foo/bar=baz" # # no owner group # same, force it self.request.OwnerGroup = '' ret = validator.validate( self.request ) self.assertEqual( ret, { 'Message' : "Request 'test_request' is missing OwnerGroup value", 'OK': False} ) self.request.OwnerGroup = "dirac_user" ## Checksum set, ChecksumType not set self.file.Checksum = "abcdef" ret = validator.validate( self.request ) self.assertEqual( ret, { 'Message' : 'File in operation #0 is missing Checksum (abcdef) or ChecksumType ()', 'OK' : False } ) ## ChecksumType set, Checksum not set self.file.Checksum = "" self.file.ChecksumType = "adler32" ret = validator.validate( self.request ) self.assertEqual( ret, { 'Message' : 'File in operation #0 is missing Checksum () or ChecksumType (ADLER32)', 'OK' : False } ) ## both set self.file.Checksum = "abcdef" self.file.ChecksumType = "adler32" ret = validator.validate( self.request ) self.assertEqual( ret, {'OK': True, 'Value': None} ) ## both unset self.file.Checksum = "" self.file.ChecksumType = None ret = validator.validate( self.request ) self.assertEqual( ret, {'OK': True, 'Value': None} ) ## all OK ret = validator.validate( self.request ) self.assertEqual( ret, {'OK': True, 'Value': None} ) ## test suite execution if __name__ == "__main__": gTestLoader = unittest.TestLoader() gSuite = gTestLoader.loadTestsFromTestCase( RequestValidatorTests ) gSuite = unittest.TestSuite( [ gSuite ] ) unittest.TextTestRunner(verbosity=3).run( gSuite )
andresailer/DIRAC
RequestManagementSystem/private/test/RequestValidatorTests.py
Python
gpl-3.0
5,248
[ "DIRAC" ]
7b73496a224979b6838ce5b7e41b9471c6eeda5e8e9dfe281dedda05dc096afc
#!/usr/bin/env python ############################################################################## # # This PPXF_POPULATION_GAS_EXAMPLE_SDSS routine shows how to study stellar # population with the procedure PPXF, which implements the Penalized Pixel-Fitting # (pPXF) method by Cappellari M., & Emsellem E., 2004, PASP, 116, 138. # # This example shows how to include gas emission lines as templates # instead of masking them using the GOODPIXELS keyword. # # MODIFICATION HISTORY: # V1.0.0: Adapted from PPXF_KINEMATICS_EXAMPLE. # Michele Cappellari, Oxford, 12 October 2011 # V1.1.0: Made a separate routine for the construction of the templates # spectral library. MC, Vicenza, 11 October 2012 # V1.1.1: Includes regul_error definition. MC, Oxford, 15 November 2012 # V2.0.0: Translated from IDL into Python. MC, Oxford, 6 December 2013 # V2.0.1: Fit SDSS rather than SAURON spectrum. MC, Oxford, 11 December 2013 # V2.1.0: Includes gas emission as templates instead of masking the spectrum. # MC, Oxford, 7 January 2014 # V2.1.1: Support both Python 2.6/2.7 and Python 3.x. MC, Oxford, 25 May 2014 # V2.1.2: Illustrates how to print and plot emission lines. MC, Oxford, 5 August 2014 # V2.1.3: Only includes emission lines falling within the fitted wavelenth range. # MC, Oxford, 3 September 2014 # V2.1.4: Explicitly sort template files as glob() output may not be sorted. # Thanks to Marina Trevisan for reporting problems under Linux. # MC, Sydney, 4 February 2015 # V2.1.5: Included origin='upper' in imshow(). Thanks to Richard McDermid # for reporting a different default value with older Matplotlib versions. # MC, Oxford, 17 February 2015 # V2.1.6: Use color= instead of c= to avoid new Matplotlib bug. # MC, Oxford, 25 February 2015 # V2.1.7: Support both Pyfits and Astropy to read FITS files. # MC, Oxford, 22 October 2015 # ############################################################################## from __future__ import print_function from astropy.io import fits from scipy import ndimage import numpy as np import glob import matplotlib.pyplot as plt from time import clock from ppxf import ppxf import ppxf_util as util def setup_spectral_library(velscale, FWHM_gal): # Read the list of filenames from the Single Stellar Population library # by Vazdekis et al. (2010, MNRAS, 404, 1639) http://miles.iac.es/. # # For this example I downloaded from the above website a set of # model spectra with default linear sampling of 0.9A/pix and default # spectral resolution of FWHM=2.51A. I selected a Salpeter IMF # (slope 1.30) and a range of population parameters: # # [M/H] = [-1.71, -1.31, -0.71, -0.40, 0.00, 0.22] # Age = range(1.0, 17.7828, 26, /LOG) # # This leads to a set of 156 model spectra with the file names like # # Mun1.30Zm0.40T03.9811.fits # # IMPORTANT: the selected models form a rectangular grid in [M/H] # and Age: for each Age the spectra sample the same set of [M/H]. # # We assume below that the model spectra have been placed in the # directory "miles_models" under the current directory. # vazdekis = glob.glob('miles_models/Mun1.30*.fits') vazdekis.sort() FWHM_tem = 2.51 # Vazdekis+10 spectra have a resolution FWHM of 2.51A. # Extract the wavelength range and logarithmically rebin one spectrum # to the same velocity scale of the SDSS galaxy spectrum, to determine # the size needed for the array which will contain the template spectra. # hdu = fits.open(vazdekis[0]) ssp = hdu[0].data h2 = hdu[0].header lamRange_temp = h2['CRVAL1'] + np.array([0.,h2['CDELT1']*(h2['NAXIS1']-1)]) sspNew, logLam_temp, velscale = util.log_rebin(lamRange_temp, ssp, velscale=velscale) # Create a three dimensional array to store the # two dimensional grid of model spectra # nAges = 26 nMetal = 6 templates = np.empty((sspNew.size,nAges,nMetal)) # Convolve the whole Vazdekis library of spectral templates # with the quadratic difference between the SDSS and the # Vazdekis instrumental resolution. Logarithmically rebin # and store each template as a column in the array TEMPLATES. # Quadratic sigma difference in pixels Vazdekis --> SDSS # The formula below is rigorously valid if the shapes of the # instrumental spectral profiles are well approximated by Gaussians. # FWHM_dif = np.sqrt(FWHM_gal**2 - FWHM_tem**2) sigma = FWHM_dif/2.355/h2['CDELT1'] # Sigma difference in pixels # Here we make sure the spectra are sorted in both [M/H] # and Age along the two axes of the rectangular grid of templates. # A simple alphabetical ordering of Vazdekis's naming convention # does not sort the files by [M/H], so we do it explicitly below # metal = ['m1.71', 'm1.31', 'm0.71', 'm0.40', 'p0.00', 'p0.22'] for k, mh in enumerate(metal): files = [s for s in vazdekis if mh in s] for j, filename in enumerate(files): hdu = fits.open(filename) ssp = hdu[0].data ssp = ndimage.gaussian_filter1d(ssp,sigma) sspNew, logLam2, velscale = util.log_rebin(lamRange_temp, ssp, velscale=velscale) templates[:,j,k] = sspNew # Templates are *not* normalized here return templates, lamRange_temp, logLam_temp #------------------------------------------------------------------------------ def ppxf_population_gas_example_sdss(): # Read SDSS DR8 galaxy spectrum taken from here http://www.sdss3.org/dr8/ # The spectrum is *already* log rebinned by the SDSS DR8 # pipeline and log_rebin should not be used in this case. # file = 'spectra/NGC3522_SDSS_DR8.fits' hdu = fits.open(file) t = hdu[1].data z = float(hdu[1].header["Z"]) # SDSS redshift estimate # Only use the wavelength range in common between galaxy and stellar library. # mask = (t['wavelength'] > 3540) & (t['wavelength'] < 7409) flux = t['flux'][mask] galaxy = flux/np.median(flux) # Normalize spectrum to avoid numerical issues wave = t['wavelength'][mask] # The noise level is chosen to give Chi^2/DOF=1 without regularization (REGUL=0). # A constant error is not a bad approximation in the fitted wavelength # range and reduces the noise in the fit. # noise = galaxy*0 + 0.01528 # Assume constant noise per pixel here # The velocity step was already chosen by the SDSS pipeline # and we convert it below to km/s # c = 299792.458 # speed of light in km/s velscale = c*np.log(wave[1]/wave[0]) FWHM_gal = 2.76 # SDSS has an approximate instrumental resolution FWHM of 2.76A. #------------------- Setup templates ----------------------- stars_templates, lamRange_temp, logLam_temp = \ setup_spectral_library(velscale, FWHM_gal) # The stellar templates are reshaped into a 2-dim array with each spectrum # as a column, however we save the original array dimensions, which are # needed to specify the regularization dimensions # reg_dim = stars_templates.shape[1:] stars_templates = stars_templates.reshape(stars_templates.shape[0], -1) # See the pPXF documentation for the keyword REGUL, # for an explanation of the following two lines # stars_templates /= np.median(stars_templates) # Normalizes stellar templates by a scalar regul_err = 0.004 # Desired regularization error # Construct a set of Gaussian emission line templates. # Estimate the wavelength fitted range in the rest frame. # lamRange_gal = np.array([np.min(wave), np.max(wave)])/(1 + z) gas_templates, line_names, line_wave = \ util.emission_lines(logLam_temp, lamRange_gal, FWHM_gal) # Combines the stellar and gaseous templates into a single array # during the PPXF fit they will be assigned a different kinematic # COMPONENT value # templates = np.column_stack([stars_templates, gas_templates]) #----------------------------------------------------------- # The galaxy and the template spectra do not have the same starting wavelength. # For this reason an extra velocity shift DV has to be applied to the template # to fit the galaxy spectrum. We remove this artificial shift by using the # keyword VSYST in the call to PPXF below, so that all velocities are # measured with respect to DV. This assume the redshift is negligible. # In the case of a high-redshift galaxy one should de-redshift its # wavelength to the rest frame before using the line below as described # in PPXF_KINEMATICS_EXAMPLE_SAURON. # c = 299792.458 dv = c*np.log(lamRange_temp[0]/wave[0]) # km/s vel = c*np.log(1 + z) # Relation between redshift and velocity in pPXF # Here the actual fit starts. The best fit is plotted on the screen. # # IMPORTANT: Ideally one would like not to use any polynomial in the fit # as the continuum shape contains important information on the population. # Unfortunately this is often not feasible, due to small calibration # uncertainties in the spectral shape. To avoid affecting the line strength of # the spectral features, we exclude additive polynomials (DEGREE=-1) and only use # multiplicative ones (MDEGREE=10). This is only recommended for population, not # for kinematic extraction, where additive polynomials are always recommended. # start = [vel, 180., 0, 0] # (km/s), starting guess for [V,sigma] t = clock() # Assign component=0 to the stellar templates and # component=1 to the gas emission lines templates. # One can easily assign different kinematic components to different gas species # e.g. component=1 for the Balmer series, component=2 for the [OIII] doublet, ...) # Input a negative MOMENTS value to keep fixed the LOSVD of a component. # nTemps = stars_templates.shape[1] nLines = gas_templates.shape[1] component = [0]*nTemps + [1]*nLines moments = [4, 2] # fit (V,sig,h3,h4) for the stars and (V,sig) for the gas start = [start, start] # adopt the same starting value for both gas and stars pp = ppxf(templates, galaxy, noise, velscale, start, plot=False, moments=moments, degree=-1, mdegree=10, vsyst=dv, clean=False, regul=1./regul_err, reg_dim=reg_dim, component=component) # Plot fit results for stars and gas plt.clf() plt.subplot(211) plt.plot(wave, pp.galaxy, 'k') plt.plot(wave, pp.bestfit, 'b', linewidth=2) plt.xlabel("Observed Wavelength ($\AA$)") plt.ylabel("Relative Flux") plt.ylim([-0.1,1.3]) plt.xlim([np.min(wave), np.max(wave)]) plt.plot(wave, pp.galaxy-pp.bestfit, 'd', ms=4, color='LimeGreen', mec='LimeGreen') # fit residuals plt.axhline(y=-0, linestyle='--', color='k', linewidth=2) stars = pp.matrix[:,:nTemps].dot(pp.weights[:nTemps]) plt.plot(wave, stars, 'r', linewidth=2) # overplot stellar templates alone gas = pp.matrix[:,-nLines:].dot(pp.weights[-nLines:]) plt.plot(wave, gas+0.15, 'b', linewidth=2) # overplot emission lines alone # When the two Delta Chi^2 below are the same, the solution is the smoothest # consistent with the observed spectrum. # print('Desired Delta Chi^2: %.4g' % np.sqrt(2*galaxy.size)) print('Current Delta Chi^2: %.4g' % ((pp.chi2 - 1)*galaxy.size)) print('Elapsed time in PPXF: %.2f s' % (clock() - t)) w = np.array(component) == 1 # Extract weights of gas emissions only print('++++++++++++++++++++++++++++++') print('Gas V=%.4g and sigma=%.2g km/s' % (pp.sol[1][0], pp.sol[1][1])) # component=1 print('Emission lines peak intensity:') for name, weight, line in zip(line_names, pp.weights[w], pp.matrix[:,w].T): print('%12s: %.3g' % (name, weight*np.max(line))) print('------------------------------') # Plot stellar population mass distribution plt.subplot(212) weights = pp.weights[:np.prod(reg_dim)].reshape(reg_dim)/pp.weights.sum() plt.imshow(np.rot90(weights), interpolation='nearest', cmap='gist_heat', aspect='auto', origin='upper', extent=(np.log10(1.0), np.log10(17.7828), -1.9, 0.45)) plt.colorbar() plt.title("Mass Fraction") plt.xlabel("log$_{10}$ Age (Gyr)") plt.ylabel("[M/H]") plt.tight_layout() plt.show() #------------------------------------------------------------------------------ if __name__ == '__main__': ppxf_population_gas_example_sdss()
cebarbosa/fossilgroups
ppxf/ppxf_population_gas_example_sdss.py
Python
gpl-3.0
12,643
[ "Galaxy", "Gaussian" ]
285d08d22d5e6c32a6c49967bdf76c09be47f6c23af3ff390b2ad0f66742b815
#!/usr/bin/env python import sys import getopt import matplotlib as mpl mpl.use('Agg') # avoid assuming X backend if we run in cron job import matplotlib.pyplot as plt from PIL import Image import datetime import collections from pathlib import Path # conda install PIL # conda install -c scitools cartopy # conda install -c Rufone pycountry def main(argv): input_filename = '' output_filename = '' unique = False odir = Path('.') # getopt(argv, options_string, long_options) # options_string: colon means takes an argument # long_options: optional parameter. If specified, must be a list # of strings with the names of the long options, # which also need to be caught. Long options that require # an argument should be followed by an equals sign. # returns a list of (option, value) pairs, and also # a list of the program args left after option list is stripped. # Each option-and-value pair returned has the option as its first # element, prefixed with a hyphen for short options or two hyphens # for long options try: opts, args = getopt.getopt(argv, "hui:o:d:",["unique","ifile=","ofile=","odir="]) except getopt.GetoptError: print('parse-psi-dl.py -i <inputfile> -o <outputfile> [--unique] [-d]') sys.exit(2) if len(argv) < 1: # note, we already stripped one out before calling main print('parse-psi-dl.py -i <inputfile> -o <outputfile> [--unique] -[-d]') sys.exit(2) for opt, arg in opts: if opt == '-h': print('parse-psi-dl.py -i <inputfile> -o <outputfile> [--unique] -[d]') sys.exit() elif opt in ("-i", "--ifile"): input_filename = arg elif opt in ("-o", "--ofile"): output_filename = arg elif opt in ("-u", "--unique"): unique = True elif opt in ("-d", "--odir"): odir = Path(arg) print(f"Input file is {input_filename}") print(f"Output file is {output_filename}") print(f"unique flag is {unique}") print(f"Output dir is {odir}") flag_colors = { 'Mac': '#4b6ba9', 'Mac-Py39': '#4b6ba9', 'Mac-Py38': '#4b6ba9', 'Mac-Py37': '#4b6ba9', 'Mac-Py36': '#4b6ba9', 'Mac-Py35': '#4b6ba9', 'Mac-Py27': '#4b6ba9', 'Lin': '#1a4162', 'Lin-Py39': '#1a4162', 'Lin-Py38': '#1a4162', 'Lin-Py37': '#1a4162', 'Lin-Py36': '#1a4162', 'Lin-Py35': '#1a4162', 'Lin-Py27': '#1a4162', 'WSL': '#a7b2c6', 'WSL-Py39': '#a7b2c6', 'WSL-Py38': '#a7b2c6', 'WSL-Py37': '#a7b2c6', 'WSL-Py36': '#a7b2c6', 'WSL-Py35': '#a7b2c6', 'WSL-Py27': '#a7b2c6', 'Win': '#394458', 'Win-Py39': '#394458', 'Win-Py38': '#394458', 'Win-Py37': '#394458', 'Win-Py36': '#394458', 'Win-Py35': '#394458', 'Win-Py27': '#394458', } # note: the newlines will be part of the lines read, # so either strip them or don't print additional newlines # when printing out with open(input_filename, 'r') as infile: lines = infile.readlines() date_list = [] time_list = [] ip_list = [] os_list = [] country_list = [] dl_by_pyos_v10 = collections.defaultdict(int) dl_by_pyos_v11 = collections.defaultdict(int) dl_by_pyos_v12 = collections.defaultdict(int) dl_by_pyos_v13 = collections.defaultdict(int) dl_by_pyos_v14 = collections.defaultdict(int) dl_by_pyos_v15 = collections.defaultdict(int) dl_by_pyos_v16 = collections.defaultdict(int) dl_by_pyos_v17 = collections.defaultdict(int) dl_by_pyos_2017 = collections.defaultdict(int) dl_by_pyos_2018 = collections.defaultdict(int) dl_by_pyos_2019 = collections.defaultdict(int) dl_by_pyos_2020 = collections.defaultdict(int) dl_by_pyos_2021 = collections.defaultdict(int) dl_by_pyos_2022 = collections.defaultdict(int) dl_by_pyos_2023 = collections.defaultdict(int) dl_by_pyos_2024 = collections.defaultdict(int) for line in lines: (date, time, ip, vers, osname, py) = line.split() if osname == "Windows" and vers == "1.1": osname = "WSL" elif osname == "WindowsWSL": osname = "WSL" else: osname = osname[:3] # if --unique, add to list only if we don't already have that ip if (not unique or ip_list.count(ip) == 0): if vers.startswith('1.0'): dl_by_pyos_v10[f'{osname}-Py{py}'] += 1 elif vers.startswith('1.1'): dl_by_pyos_v11[f'{osname}-Py{py}'] += 1 elif vers.startswith('1.2'): dl_by_pyos_v12[f'{osname}-Py{py}'] += 1 elif vers.startswith('1.3'): dl_by_pyos_v13[f'{osname}-Py{py}'] += 1 elif vers.startswith('1.4'): dl_by_pyos_v14[f'{osname}-Py{py}'] += 1 elif vers.startswith('1.5'): dl_by_pyos_v15[f'{osname}-Py{py}'] += 1 elif vers.startswith('1.6'): dl_by_pyos_v16[f'{osname}-Py{py}'] += 1 elif vers.startswith('1.7'): dl_by_pyos_v17[f'{osname}-Py{py}'] += 1 if date.startswith('2017'): dl_by_pyos_2017[f'{osname}-v{vers}'] += 1 elif date.startswith('2018'): dl_by_pyos_2018[f'{osname}-v{vers}'] += 1 elif date.startswith('2019'): dl_by_pyos_2019[f'{osname}-v{vers}'] += 1 elif date.startswith('2020'): dl_by_pyos_2020[f'{osname}-v{vers}'] += 1 elif date.startswith('2021'): dl_by_pyos_2021[f'{osname}-v{vers}'] += 1 elif date.startswith('2022'): dl_by_pyos_2022[f'{osname}-v{vers}'] += 1 elif date.startswith('2023'): dl_by_pyos_2023[f'{osname}-v{vers}'] += 1 elif date.startswith('2024'): dl_by_pyos_2024[f'{osname}-v{vers}'] += 1 def figure_from_count_dict(label, dicary): if len(dicary) == 0: return print(f'\n<<< {label} >>>\n') # now sort them back so the small wedges are at starting point resorted_pyos = [] resorted_downloads = [] for c in sorted(dicary.items(), key=lambda k: k[1]): resorted_pyos.append(c[0]) resorted_downloads.append(c[1]) print(c) alt_colors = ['tan', 'purple', 'lightgrey', 'teal', 'goldenrod', '#ce1126', '#63b2be'] alt_count = 0 colors = [] labels = [] min_label = int(0.018 * sum(resorted_downloads)) for i, c in enumerate(resorted_pyos): if resorted_downloads[i] > min_label: labels.append(f"{c.replace('v', '')} ({resorted_downloads[i]})") else: labels.append('') if c in flag_colors: colors.append(flag_colors[c]) elif c[:3] in flag_colors: colors.append(flag_colors[c[:3]]) else: if (alt_count < len(alt_colors)): colors.append(alt_colors[alt_count]) alt_count += 1 else: colors.append('gray') mpl.rcParams['font.size'] = 10 # pie chart of the results fig = plt.figure(num=1, figsize=(4, 4)) ax = fig.add_subplot(111, label=label) patches, texts = ax.pie(resorted_downloads, colors=colors, labels=labels, labeldistance=1.05) # The below subplots_adjust would work except that we need axis('equal'), # which forces some dead space vertically that we don't want, to match the # dead space horizontally that we do need #plt.subplots_adjust(left=0.25, right=0.75, top=0.9, bottom=0.1) for p in patches: p.set_edgecolor('white') for t in texts: t.set_size('smaller') now = datetime.datetime.now().strftime("%Y-%m-%d") uniq = "Unique " if unique else "" if label.startswith('v'): startdate = '2017-05-19' enddate = now elif label.startswith('20'): startdate = '2017-05-19' if label == '2017' else f'{label}-01-01' enddate = now if now.startswith(label) else f'{label}-12-31' outline = f"{uniq}Psi4 {label} installer downloads: {sum(resorted_downloads)}\n{startdate} to {enddate}\n" ax.set_title(outline) print(outline) ax.set_xlabel('Not including conda updates or github clones') ax.axis('equal') # enforce circular shape, no distortion of plot area # padding to avoid clipping of labels plt.tight_layout(pad=3.2, w_pad=1.0, h_pad=1.0) plt.savefig('psitmppyos.png', format='png', transparent=True) plt.close() # clean up file, it needs cropping top and bottom img = Image.open('psitmppyos.png') x1, y1 = img.size cropped_img = img.crop((0, y1/10, x1, y1-y1/10)) cropped_img.save(odir / f'psi-downloads-pie-pyos-{label}.png') figure_from_count_dict('v1.0', dl_by_pyos_v10) figure_from_count_dict('v1.1', dl_by_pyos_v11) figure_from_count_dict('v1.2', dl_by_pyos_v12) figure_from_count_dict('v1.3', dl_by_pyos_v13) figure_from_count_dict('v1.4', dl_by_pyos_v14) figure_from_count_dict('v1.5', dl_by_pyos_v15) figure_from_count_dict('v1.6', dl_by_pyos_v16) figure_from_count_dict('v1.7', dl_by_pyos_v17) figure_from_count_dict('2017', dl_by_pyos_2017) figure_from_count_dict('2018', dl_by_pyos_2018) figure_from_count_dict('2019', dl_by_pyos_2019) figure_from_count_dict('2020', dl_by_pyos_2020) figure_from_count_dict('2021', dl_by_pyos_2021) figure_from_count_dict('2022', dl_by_pyos_2022) figure_from_count_dict('2023', dl_by_pyos_2023) figure_from_count_dict('2024', dl_by_pyos_2024) if __name__ == "__main__": main(sys.argv[1:])
psi4/psi4meta
download-analysis/installer/parse-psi-dl-ospy.py
Python
gpl-2.0
10,091
[ "Psi4" ]
d14171a4ad99b793939fad4d22ca0e9a5ba8f74b9a969a06efcae53044a837bc
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """This module contains a Google Cloud Vertex AI hook.""" from typing import Dict, List, Optional, Sequence, Tuple, Union from google.api_core.operation import Operation from google.api_core.retry import Retry from google.cloud.aiplatform import ( CustomContainerTrainingJob, CustomPythonPackageTrainingJob, CustomTrainingJob, datasets, models, ) from google.cloud.aiplatform_v1 import JobServiceClient, PipelineServiceClient from google.cloud.aiplatform_v1.services.job_service.pagers import ListCustomJobsPager from google.cloud.aiplatform_v1.services.pipeline_service.pagers import ( ListPipelineJobsPager, ListTrainingPipelinesPager, ) from google.cloud.aiplatform_v1.types import CustomJob, PipelineJob, TrainingPipeline from airflow import AirflowException from airflow.providers.google.common.hooks.base_google import GoogleBaseHook class CustomJobHook(GoogleBaseHook): """Hook for Google Cloud Vertex AI Custom Job APIs.""" def __init__( self, gcp_conn_id: str = "google_cloud_default", delegate_to: Optional[str] = None, impersonation_chain: Optional[Union[str, Sequence[str]]] = None, ) -> None: super().__init__( gcp_conn_id=gcp_conn_id, delegate_to=delegate_to, impersonation_chain=impersonation_chain, ) self._job: Optional[ Union[ CustomContainerTrainingJob, CustomPythonPackageTrainingJob, CustomTrainingJob, ] ] = None def get_pipeline_service_client( self, region: Optional[str] = None, ) -> PipelineServiceClient: """Returns PipelineServiceClient.""" client_options = None if region and region != 'global': client_options = {'api_endpoint': f'{region}-aiplatform.googleapis.com:443'} return PipelineServiceClient( credentials=self._get_credentials(), client_info=self.client_info, client_options=client_options ) def get_job_service_client( self, region: Optional[str] = None, ) -> JobServiceClient: """Returns JobServiceClient""" client_options = None if region and region != 'global': client_options = {'api_endpoint': f'{region}-aiplatform.googleapis.com:443'} return JobServiceClient( credentials=self._get_credentials(), client_info=self.client_info, client_options=client_options ) def get_custom_container_training_job( self, display_name: str, container_uri: str, command: Sequence[str] = [], model_serving_container_image_uri: Optional[str] = None, model_serving_container_predict_route: Optional[str] = None, model_serving_container_health_route: Optional[str] = None, model_serving_container_command: Optional[Sequence[str]] = None, model_serving_container_args: Optional[Sequence[str]] = None, model_serving_container_environment_variables: Optional[Dict[str, str]] = None, model_serving_container_ports: Optional[Sequence[int]] = None, model_description: Optional[str] = None, model_instance_schema_uri: Optional[str] = None, model_parameters_schema_uri: Optional[str] = None, model_prediction_schema_uri: Optional[str] = None, project: Optional[str] = None, location: Optional[str] = None, labels: Optional[Dict[str, str]] = None, training_encryption_spec_key_name: Optional[str] = None, model_encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, ) -> CustomContainerTrainingJob: """Returns CustomContainerTrainingJob object""" return CustomContainerTrainingJob( display_name=display_name, container_uri=container_uri, command=command, model_serving_container_image_uri=model_serving_container_image_uri, model_serving_container_predict_route=model_serving_container_predict_route, model_serving_container_health_route=model_serving_container_health_route, model_serving_container_command=model_serving_container_command, model_serving_container_args=model_serving_container_args, model_serving_container_environment_variables=model_serving_container_environment_variables, model_serving_container_ports=model_serving_container_ports, model_description=model_description, model_instance_schema_uri=model_instance_schema_uri, model_parameters_schema_uri=model_parameters_schema_uri, model_prediction_schema_uri=model_prediction_schema_uri, project=project, location=location, credentials=self._get_credentials(), labels=labels, training_encryption_spec_key_name=training_encryption_spec_key_name, model_encryption_spec_key_name=model_encryption_spec_key_name, staging_bucket=staging_bucket, ) def get_custom_python_package_training_job( self, display_name: str, python_package_gcs_uri: str, python_module_name: str, container_uri: str, model_serving_container_image_uri: Optional[str] = None, model_serving_container_predict_route: Optional[str] = None, model_serving_container_health_route: Optional[str] = None, model_serving_container_command: Optional[Sequence[str]] = None, model_serving_container_args: Optional[Sequence[str]] = None, model_serving_container_environment_variables: Optional[Dict[str, str]] = None, model_serving_container_ports: Optional[Sequence[int]] = None, model_description: Optional[str] = None, model_instance_schema_uri: Optional[str] = None, model_parameters_schema_uri: Optional[str] = None, model_prediction_schema_uri: Optional[str] = None, project: Optional[str] = None, location: Optional[str] = None, labels: Optional[Dict[str, str]] = None, training_encryption_spec_key_name: Optional[str] = None, model_encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, ): """Returns CustomPythonPackageTrainingJob object""" return CustomPythonPackageTrainingJob( display_name=display_name, container_uri=container_uri, python_package_gcs_uri=python_package_gcs_uri, python_module_name=python_module_name, model_serving_container_image_uri=model_serving_container_image_uri, model_serving_container_predict_route=model_serving_container_predict_route, model_serving_container_health_route=model_serving_container_health_route, model_serving_container_command=model_serving_container_command, model_serving_container_args=model_serving_container_args, model_serving_container_environment_variables=model_serving_container_environment_variables, model_serving_container_ports=model_serving_container_ports, model_description=model_description, model_instance_schema_uri=model_instance_schema_uri, model_parameters_schema_uri=model_parameters_schema_uri, model_prediction_schema_uri=model_prediction_schema_uri, project=project, location=location, credentials=self._get_credentials(), labels=labels, training_encryption_spec_key_name=training_encryption_spec_key_name, model_encryption_spec_key_name=model_encryption_spec_key_name, staging_bucket=staging_bucket, ) def get_custom_training_job( self, display_name: str, script_path: str, container_uri: str, requirements: Optional[Sequence[str]] = None, model_serving_container_image_uri: Optional[str] = None, model_serving_container_predict_route: Optional[str] = None, model_serving_container_health_route: Optional[str] = None, model_serving_container_command: Optional[Sequence[str]] = None, model_serving_container_args: Optional[Sequence[str]] = None, model_serving_container_environment_variables: Optional[Dict[str, str]] = None, model_serving_container_ports: Optional[Sequence[int]] = None, model_description: Optional[str] = None, model_instance_schema_uri: Optional[str] = None, model_parameters_schema_uri: Optional[str] = None, model_prediction_schema_uri: Optional[str] = None, project: Optional[str] = None, location: Optional[str] = None, labels: Optional[Dict[str, str]] = None, training_encryption_spec_key_name: Optional[str] = None, model_encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, ): """Returns CustomTrainingJob object""" return CustomTrainingJob( display_name=display_name, script_path=script_path, container_uri=container_uri, requirements=requirements, model_serving_container_image_uri=model_serving_container_image_uri, model_serving_container_predict_route=model_serving_container_predict_route, model_serving_container_health_route=model_serving_container_health_route, model_serving_container_command=model_serving_container_command, model_serving_container_args=model_serving_container_args, model_serving_container_environment_variables=model_serving_container_environment_variables, model_serving_container_ports=model_serving_container_ports, model_description=model_description, model_instance_schema_uri=model_instance_schema_uri, model_parameters_schema_uri=model_parameters_schema_uri, model_prediction_schema_uri=model_prediction_schema_uri, project=project, location=location, credentials=self._get_credentials(), labels=labels, training_encryption_spec_key_name=training_encryption_spec_key_name, model_encryption_spec_key_name=model_encryption_spec_key_name, staging_bucket=staging_bucket, ) @staticmethod def extract_model_id(obj: Dict) -> str: """Returns unique id of the Model.""" return obj["name"].rpartition("/")[-1] def wait_for_operation(self, operation: Operation, timeout: Optional[float] = None): """Waits for long-lasting operation to complete.""" try: return operation.result(timeout=timeout) except Exception: error = operation.exception(timeout=timeout) raise AirflowException(error) def cancel_job(self) -> None: """Cancel Job for training pipeline""" if self._job: self._job.cancel() def _run_job( self, job: Union[ CustomTrainingJob, CustomContainerTrainingJob, CustomPythonPackageTrainingJob, ], dataset: Optional[ Union[ datasets.ImageDataset, datasets.TabularDataset, datasets.TextDataset, datasets.VideoDataset, ] ] = None, annotation_schema_uri: Optional[str] = None, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, base_output_dir: Optional[str] = None, service_account: Optional[str] = None, network: Optional[str] = None, bigquery_destination: Optional[str] = None, args: Optional[List[Union[str, float, int]]] = None, environment_variables: Optional[Dict[str, str]] = None, replica_count: int = 1, machine_type: str = "n1-standard-4", accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED", accelerator_count: int = 0, boot_disk_type: str = "pd-ssd", boot_disk_size_gb: int = 100, training_fraction_split: Optional[float] = None, validation_fraction_split: Optional[float] = None, test_fraction_split: Optional[float] = None, training_filter_split: Optional[str] = None, validation_filter_split: Optional[str] = None, test_filter_split: Optional[str] = None, predefined_split_column_name: Optional[str] = None, timestamp_split_column_name: Optional[str] = None, tensorboard: Optional[str] = None, sync=True, ) -> models.Model: """Run Job for training pipeline""" model = job.run( dataset=dataset, annotation_schema_uri=annotation_schema_uri, model_display_name=model_display_name, model_labels=model_labels, base_output_dir=base_output_dir, service_account=service_account, network=network, bigquery_destination=bigquery_destination, args=args, environment_variables=environment_variables, replica_count=replica_count, machine_type=machine_type, accelerator_type=accelerator_type, accelerator_count=accelerator_count, boot_disk_type=boot_disk_type, boot_disk_size_gb=boot_disk_size_gb, training_fraction_split=training_fraction_split, validation_fraction_split=validation_fraction_split, test_fraction_split=test_fraction_split, training_filter_split=training_filter_split, validation_filter_split=validation_filter_split, test_filter_split=test_filter_split, predefined_split_column_name=predefined_split_column_name, timestamp_split_column_name=timestamp_split_column_name, tensorboard=tensorboard, sync=sync, ) if model: model.wait() return model else: raise AirflowException("Training did not produce a Managed Model returning None.") @GoogleBaseHook.fallback_to_default_project_id def cancel_pipeline_job( self, project_id: str, region: str, pipeline_job: str, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: """ Cancels a PipelineJob. Starts asynchronous cancellation on the PipelineJob. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the PipelineJob is not deleted; instead it becomes a pipeline with a [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] is set to ``CANCELLED``. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param pipeline_job: The name of the PipelineJob to cancel. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_pipeline_service_client(region) name = client.pipeline_job_path(project_id, region, pipeline_job) client.cancel_pipeline_job( request={ 'name': name, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def cancel_training_pipeline( self, project_id: str, region: str, training_pipeline: str, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: """ Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to ``CANCELLED``. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param training_pipeline: Required. The name of the TrainingPipeline to cancel. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_pipeline_service_client(region) name = client.training_pipeline_path(project_id, region, training_pipeline) client.cancel_training_pipeline( request={ 'name': name, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def cancel_custom_job( self, project_id: str, region: str, custom_job: str, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: """ Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to ``CANCELLED``. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param custom_job: Required. The name of the CustomJob to cancel. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_job_service_client(region) name = JobServiceClient.custom_job_path(project_id, region, custom_job) client.cancel_custom_job( request={ 'name': name, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def create_pipeline_job( self, project_id: str, region: str, pipeline_job: PipelineJob, pipeline_job_id: str, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> PipelineJob: """ Creates a PipelineJob. A PipelineJob will run immediately when created. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param pipeline_job: Required. The PipelineJob to create. :param pipeline_job_id: The ID to use for the PipelineJob, which will become the final component of the PipelineJob name. If not provided, an ID will be automatically generated. This value should be less than 128 characters, and valid characters are /[a-z][0-9]-/. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_pipeline_service_client(region) parent = client.common_location_path(project_id, region) result = client.create_pipeline_job( request={ 'parent': parent, 'pipeline_job': pipeline_job, 'pipeline_job_id': pipeline_job_id, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def create_training_pipeline( self, project_id: str, region: str, training_pipeline: TrainingPipeline, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> TrainingPipeline: """ Creates a TrainingPipeline. A created TrainingPipeline right away will be attempted to be run. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param training_pipeline: Required. The TrainingPipeline to create. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_pipeline_service_client(region) parent = client.common_location_path(project_id, region) result = client.create_training_pipeline( request={ 'parent': parent, 'training_pipeline': training_pipeline, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def create_custom_job( self, project_id: str, region: str, custom_job: CustomJob, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> CustomJob: """ Creates a CustomJob. A created CustomJob right away will be attempted to be run. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param custom_job: Required. The CustomJob to create. This corresponds to the ``custom_job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_job_service_client(region) parent = JobServiceClient.common_location_path(project_id, region) result = client.create_custom_job( request={ 'parent': parent, 'custom_job': custom_job, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def create_custom_container_training_job( self, project_id: str, region: str, display_name: str, container_uri: str, command: Sequence[str] = [], model_serving_container_image_uri: Optional[str] = None, model_serving_container_predict_route: Optional[str] = None, model_serving_container_health_route: Optional[str] = None, model_serving_container_command: Optional[Sequence[str]] = None, model_serving_container_args: Optional[Sequence[str]] = None, model_serving_container_environment_variables: Optional[Dict[str, str]] = None, model_serving_container_ports: Optional[Sequence[int]] = None, model_description: Optional[str] = None, model_instance_schema_uri: Optional[str] = None, model_parameters_schema_uri: Optional[str] = None, model_prediction_schema_uri: Optional[str] = None, labels: Optional[Dict[str, str]] = None, training_encryption_spec_key_name: Optional[str] = None, model_encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, # RUN dataset: Optional[ Union[ datasets.ImageDataset, datasets.TabularDataset, datasets.TextDataset, datasets.VideoDataset, ] ] = None, annotation_schema_uri: Optional[str] = None, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, base_output_dir: Optional[str] = None, service_account: Optional[str] = None, network: Optional[str] = None, bigquery_destination: Optional[str] = None, args: Optional[List[Union[str, float, int]]] = None, environment_variables: Optional[Dict[str, str]] = None, replica_count: int = 1, machine_type: str = "n1-standard-4", accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED", accelerator_count: int = 0, boot_disk_type: str = "pd-ssd", boot_disk_size_gb: int = 100, training_fraction_split: Optional[float] = None, validation_fraction_split: Optional[float] = None, test_fraction_split: Optional[float] = None, training_filter_split: Optional[str] = None, validation_filter_split: Optional[str] = None, test_filter_split: Optional[str] = None, predefined_split_column_name: Optional[str] = None, timestamp_split_column_name: Optional[str] = None, tensorboard: Optional[str] = None, sync=True, ) -> models.Model: """ Create Custom Container Training Job :param display_name: Required. The user-defined name of this TrainingPipeline. :param command: The command to be invoked when the container is started. It overrides the entrypoint instruction in Dockerfile when provided :param container_uri: Required: Uri of the training container image in the GCR. :param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI of the Model serving container suitable for serving the model produced by the training script. :param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An HTTP path to send prediction requests to the container, and which must be supported by it. If not specified a default HTTP path will be used by Vertex AI. :param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an HTTP path to send health check requests to the container, and which must be supported by it. If not specified a standard HTTP path will be used by AI Platform. :param model_serving_container_command: The command with which the container is run. Not executed within a shell. The Docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. :param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. :param model_serving_container_environment_variables: The environment variables that are to be present in the container. Should be a dictionary where keys are environment variable names and values are environment variable values for those names. :param model_serving_container_ports: Declaration of ports that are exposed by the container. This field is primarily informational, it gives Vertex AI information about the network connections the container uses. Listing or not a port here has no impact on whether the port is actually exposed, any port listening on the default "0.0.0.0" address inside a container will be accessible from the network. :param model_description: The description of the Model. :param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in ``PredictRequest.instances``, ``ExplainRequest.instances`` and ``BatchPredictionJob.input_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. :param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via ``PredictRequest.parameters``, ``ExplainRequest.parameters`` and ``BatchPredictionJob.model_parameters``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform, if no parameters are supported it is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. :param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via ``PredictResponse.predictions``, ``ExplainResponse.explanations``, and ``BatchPredictionJob.output_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. :param project_id: Project to run training in. :param region: Location to run training in. :param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. :param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed encryption key used to protect the training pipeline. Has the form: ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be in the same region as where the compute resource is created. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload`` is not set separately. :param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed encryption key used to protect the model. Has the form: ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be in the same region as where the compute resource is created. If set, the trained Model will be secured by this key. :param staging_bucket: Bucket used to stage source and training artifacts. :param dataset: Vertex AI to fit this training against. :param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object] (https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object) Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with ``annotations_filter``, the Annotations used for training are filtered by both ``annotations_filter`` and ``annotation_schema_uri``. :param model_display_name: If the script produces a managed Vertex AI Model. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon creation, the job's display_name is used. :param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. :param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the staging directory will be used. Vertex AI sets the following environment variables when it runs your training code: - AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. <base_output_dir>/model/ - AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. <base_output_dir>/checkpoints/ - AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. <base_output_dir>/logs/ :param service_account: Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. :param network: The full name of the Compute Engine network to which the job should be peered. Private services access must already be configured for the network. If left unspecified, the job is not peered with any network. :param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset. The BigQuery project location where the training data is to be written to. In the given project a new dataset is created with name ``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`` where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training input data will be written into that dataset. In the dataset three tables will be created, ``training``, ``validation`` and ``test``. - AIP_DATA_FORMAT = "bigquery". - AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training" - AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation" - AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test" :param args: Command line arguments to be passed to the Python script. :param environment_variables: Environment variables to be passed to the container. Should be a dictionary where keys are environment variable names and values are environment variable values for those names. At most 10 environment variables can be specified. The Name of the environment variable must be unique. :param replica_count: The number of worker replicas. If replica count = 1 then one chief replica will be provisioned. If replica_count > 1 the remainder will be provisioned as a worker replica pool. :param machine_type: The type of machine to use for training. :param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4 :param accelerator_count: The number of accelerators to attach to a worker replica. :param boot_disk_type: Type of the boot disk, default is `pd-ssd`. Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or `pd-standard` (Persistent Disk Hard Disk Drive). :param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB. boot disk size must be within the range of [100, 64000]. :param training_fraction_split: Optional. The fraction of the input data that is to be used to train the Model. This is ignored if Dataset is not provided. :param validation_fraction_split: Optional. The fraction of the input data that is to be used to validate the Model. This is ignored if Dataset is not provided. :param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate the Model. This is ignored if Dataset is not provided. :param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. :param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. :param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. :param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data columns. The value of the key (either the label's value or value in the column) must be one of {``training``, ``validation``, ``test``}, and it defines to which set the given piece of data is assigned. If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only for tabular and time series Datasets. :param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data columns. The value of the key values of the key (the values in the column) must be in RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only for tabular and time series Datasets. :param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload logs. Format: ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training :param sync: Whether to execute the AI Platform job synchronously. If False, this method will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. """ self._job = self.get_custom_container_training_job( project=project_id, location=region, display_name=display_name, container_uri=container_uri, command=command, model_serving_container_image_uri=model_serving_container_image_uri, model_serving_container_predict_route=model_serving_container_predict_route, model_serving_container_health_route=model_serving_container_health_route, model_serving_container_command=model_serving_container_command, model_serving_container_args=model_serving_container_args, model_serving_container_environment_variables=model_serving_container_environment_variables, model_serving_container_ports=model_serving_container_ports, model_description=model_description, model_instance_schema_uri=model_instance_schema_uri, model_parameters_schema_uri=model_parameters_schema_uri, model_prediction_schema_uri=model_prediction_schema_uri, labels=labels, training_encryption_spec_key_name=training_encryption_spec_key_name, model_encryption_spec_key_name=model_encryption_spec_key_name, staging_bucket=staging_bucket, ) if not self._job: raise AirflowException("CustomJob was not created") model = self._run_job( job=self._job, dataset=dataset, annotation_schema_uri=annotation_schema_uri, model_display_name=model_display_name, model_labels=model_labels, base_output_dir=base_output_dir, service_account=service_account, network=network, bigquery_destination=bigquery_destination, args=args, environment_variables=environment_variables, replica_count=replica_count, machine_type=machine_type, accelerator_type=accelerator_type, accelerator_count=accelerator_count, boot_disk_type=boot_disk_type, boot_disk_size_gb=boot_disk_size_gb, training_fraction_split=training_fraction_split, validation_fraction_split=validation_fraction_split, test_fraction_split=test_fraction_split, training_filter_split=training_filter_split, validation_filter_split=validation_filter_split, test_filter_split=test_filter_split, predefined_split_column_name=predefined_split_column_name, timestamp_split_column_name=timestamp_split_column_name, tensorboard=tensorboard, sync=sync, ) return model @GoogleBaseHook.fallback_to_default_project_id def create_custom_python_package_training_job( self, project_id: str, region: str, display_name: str, python_package_gcs_uri: str, python_module_name: str, container_uri: str, model_serving_container_image_uri: Optional[str] = None, model_serving_container_predict_route: Optional[str] = None, model_serving_container_health_route: Optional[str] = None, model_serving_container_command: Optional[Sequence[str]] = None, model_serving_container_args: Optional[Sequence[str]] = None, model_serving_container_environment_variables: Optional[Dict[str, str]] = None, model_serving_container_ports: Optional[Sequence[int]] = None, model_description: Optional[str] = None, model_instance_schema_uri: Optional[str] = None, model_parameters_schema_uri: Optional[str] = None, model_prediction_schema_uri: Optional[str] = None, labels: Optional[Dict[str, str]] = None, training_encryption_spec_key_name: Optional[str] = None, model_encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, # RUN dataset: Optional[ Union[ datasets.ImageDataset, datasets.TabularDataset, datasets.TextDataset, datasets.VideoDataset, ] ] = None, annotation_schema_uri: Optional[str] = None, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, base_output_dir: Optional[str] = None, service_account: Optional[str] = None, network: Optional[str] = None, bigquery_destination: Optional[str] = None, args: Optional[List[Union[str, float, int]]] = None, environment_variables: Optional[Dict[str, str]] = None, replica_count: int = 1, machine_type: str = "n1-standard-4", accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED", accelerator_count: int = 0, boot_disk_type: str = "pd-ssd", boot_disk_size_gb: int = 100, training_fraction_split: Optional[float] = None, validation_fraction_split: Optional[float] = None, test_fraction_split: Optional[float] = None, training_filter_split: Optional[str] = None, validation_filter_split: Optional[str] = None, test_filter_split: Optional[str] = None, predefined_split_column_name: Optional[str] = None, timestamp_split_column_name: Optional[str] = None, tensorboard: Optional[str] = None, sync=True, ) -> models.Model: """ Create Custom Python Package Training Job :param display_name: Required. The user-defined name of this TrainingPipeline. :param python_package_gcs_uri: Required: GCS location of the training python package. :param python_module_name: Required: The module name of the training python package. :param container_uri: Required: Uri of the training container image in the GCR. :param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI of the Model serving container suitable for serving the model produced by the training script. :param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An HTTP path to send prediction requests to the container, and which must be supported by it. If not specified a default HTTP path will be used by Vertex AI. :param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an HTTP path to send health check requests to the container, and which must be supported by it. If not specified a standard HTTP path will be used by AI Platform. :param model_serving_container_command: The command with which the container is run. Not executed within a shell. The Docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. :param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. :param model_serving_container_environment_variables: The environment variables that are to be present in the container. Should be a dictionary where keys are environment variable names and values are environment variable values for those names. :param model_serving_container_ports: Declaration of ports that are exposed by the container. This field is primarily informational, it gives Vertex AI information about the network connections the container uses. Listing or not a port here has no impact on whether the port is actually exposed, any port listening on the default "0.0.0.0" address inside a container will be accessible from the network. :param model_description: The description of the Model. :param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in ``PredictRequest.instances``, ``ExplainRequest.instances`` and ``BatchPredictionJob.input_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. :param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via ``PredictRequest.parameters``, ``ExplainRequest.parameters`` and ``BatchPredictionJob.model_parameters``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform, if no parameters are supported it is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. :param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via ``PredictResponse.predictions``, ``ExplainResponse.explanations``, and ``BatchPredictionJob.output_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. :param project_id: Project to run training in. :param region: Location to run training in. :param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. :param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed encryption key used to protect the training pipeline. Has the form: ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be in the same region as where the compute resource is created. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload`` is not set separately. :param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed encryption key used to protect the model. Has the form: ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be in the same region as where the compute resource is created. If set, the trained Model will be secured by this key. :param staging_bucket: Bucket used to stage source and training artifacts. :param dataset: Vertex AI to fit this training against. :param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object] (https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object) Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with ``annotations_filter``, the Annotations used for training are filtered by both ``annotations_filter`` and ``annotation_schema_uri``. :param model_display_name: If the script produces a managed Vertex AI Model. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon creation, the job's display_name is used. :param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. :param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the staging directory will be used. Vertex AI sets the following environment variables when it runs your training code: - AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. <base_output_dir>/model/ - AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. <base_output_dir>/checkpoints/ - AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. <base_output_dir>/logs/ :param service_account: Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. :param network: The full name of the Compute Engine network to which the job should be peered. Private services access must already be configured for the network. If left unspecified, the job is not peered with any network. :param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset. The BigQuery project location where the training data is to be written to. In the given project a new dataset is created with name ``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`` where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training input data will be written into that dataset. In the dataset three tables will be created, ``training``, ``validation`` and ``test``. - AIP_DATA_FORMAT = "bigquery". - AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training" - AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation" - AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test" :param args: Command line arguments to be passed to the Python script. :param environment_variables: Environment variables to be passed to the container. Should be a dictionary where keys are environment variable names and values are environment variable values for those names. At most 10 environment variables can be specified. The Name of the environment variable must be unique. :param replica_count: The number of worker replicas. If replica count = 1 then one chief replica will be provisioned. If replica_count > 1 the remainder will be provisioned as a worker replica pool. :param machine_type: The type of machine to use for training. :param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4 :param accelerator_count: The number of accelerators to attach to a worker replica. :param boot_disk_type: Type of the boot disk, default is `pd-ssd`. Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or `pd-standard` (Persistent Disk Hard Disk Drive). :param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB. boot disk size must be within the range of [100, 64000]. :param training_fraction_split: Optional. The fraction of the input data that is to be used to train the Model. This is ignored if Dataset is not provided. :param validation_fraction_split: Optional. The fraction of the input data that is to be used to validate the Model. This is ignored if Dataset is not provided. :param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate the Model. This is ignored if Dataset is not provided. :param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. :param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. :param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. :param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data columns. The value of the key (either the label's value or value in the column) must be one of {``training``, ``validation``, ``test``}, and it defines to which set the given piece of data is assigned. If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only for tabular and time series Datasets. :param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data columns. The value of the key values of the key (the values in the column) must be in RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only for tabular and time series Datasets. :param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload logs. Format: ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training :param sync: Whether to execute the AI Platform job synchronously. If False, this method will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. """ self._job = self.get_custom_python_package_training_job( project=project_id, location=region, display_name=display_name, python_package_gcs_uri=python_package_gcs_uri, python_module_name=python_module_name, container_uri=container_uri, model_serving_container_image_uri=model_serving_container_image_uri, model_serving_container_predict_route=model_serving_container_predict_route, model_serving_container_health_route=model_serving_container_health_route, model_serving_container_command=model_serving_container_command, model_serving_container_args=model_serving_container_args, model_serving_container_environment_variables=model_serving_container_environment_variables, model_serving_container_ports=model_serving_container_ports, model_description=model_description, model_instance_schema_uri=model_instance_schema_uri, model_parameters_schema_uri=model_parameters_schema_uri, model_prediction_schema_uri=model_prediction_schema_uri, labels=labels, training_encryption_spec_key_name=training_encryption_spec_key_name, model_encryption_spec_key_name=model_encryption_spec_key_name, staging_bucket=staging_bucket, ) if not self._job: raise AirflowException("CustomJob was not created") model = self._run_job( job=self._job, dataset=dataset, annotation_schema_uri=annotation_schema_uri, model_display_name=model_display_name, model_labels=model_labels, base_output_dir=base_output_dir, service_account=service_account, network=network, bigquery_destination=bigquery_destination, args=args, environment_variables=environment_variables, replica_count=replica_count, machine_type=machine_type, accelerator_type=accelerator_type, accelerator_count=accelerator_count, boot_disk_type=boot_disk_type, boot_disk_size_gb=boot_disk_size_gb, training_fraction_split=training_fraction_split, validation_fraction_split=validation_fraction_split, test_fraction_split=test_fraction_split, training_filter_split=training_filter_split, validation_filter_split=validation_filter_split, test_filter_split=test_filter_split, predefined_split_column_name=predefined_split_column_name, timestamp_split_column_name=timestamp_split_column_name, tensorboard=tensorboard, sync=sync, ) return model @GoogleBaseHook.fallback_to_default_project_id def create_custom_training_job( self, project_id: str, region: str, display_name: str, script_path: str, container_uri: str, requirements: Optional[Sequence[str]] = None, model_serving_container_image_uri: Optional[str] = None, model_serving_container_predict_route: Optional[str] = None, model_serving_container_health_route: Optional[str] = None, model_serving_container_command: Optional[Sequence[str]] = None, model_serving_container_args: Optional[Sequence[str]] = None, model_serving_container_environment_variables: Optional[Dict[str, str]] = None, model_serving_container_ports: Optional[Sequence[int]] = None, model_description: Optional[str] = None, model_instance_schema_uri: Optional[str] = None, model_parameters_schema_uri: Optional[str] = None, model_prediction_schema_uri: Optional[str] = None, labels: Optional[Dict[str, str]] = None, training_encryption_spec_key_name: Optional[str] = None, model_encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, # RUN dataset: Optional[ Union[ datasets.ImageDataset, datasets.TabularDataset, datasets.TextDataset, datasets.VideoDataset, ] ] = None, annotation_schema_uri: Optional[str] = None, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, base_output_dir: Optional[str] = None, service_account: Optional[str] = None, network: Optional[str] = None, bigquery_destination: Optional[str] = None, args: Optional[List[Union[str, float, int]]] = None, environment_variables: Optional[Dict[str, str]] = None, replica_count: int = 1, machine_type: str = "n1-standard-4", accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED", accelerator_count: int = 0, boot_disk_type: str = "pd-ssd", boot_disk_size_gb: int = 100, training_fraction_split: Optional[float] = None, validation_fraction_split: Optional[float] = None, test_fraction_split: Optional[float] = None, training_filter_split: Optional[str] = None, validation_filter_split: Optional[str] = None, test_filter_split: Optional[str] = None, predefined_split_column_name: Optional[str] = None, timestamp_split_column_name: Optional[str] = None, tensorboard: Optional[str] = None, sync=True, ) -> models.Model: """ Create Custom Training Job :param display_name: Required. The user-defined name of this TrainingPipeline. :param script_path: Required. Local path to training script. :param container_uri: Required: Uri of the training container image in the GCR. :param requirements: List of python packages dependencies of script. :param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI of the Model serving container suitable for serving the model produced by the training script. :param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An HTTP path to send prediction requests to the container, and which must be supported by it. If not specified a default HTTP path will be used by Vertex AI. :param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an HTTP path to send health check requests to the container, and which must be supported by it. If not specified a standard HTTP path will be used by AI Platform. :param model_serving_container_command: The command with which the container is run. Not executed within a shell. The Docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. :param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. :param model_serving_container_environment_variables: The environment variables that are to be present in the container. Should be a dictionary where keys are environment variable names and values are environment variable values for those names. :param model_serving_container_ports: Declaration of ports that are exposed by the container. This field is primarily informational, it gives Vertex AI information about the network connections the container uses. Listing or not a port here has no impact on whether the port is actually exposed, any port listening on the default "0.0.0.0" address inside a container will be accessible from the network. :param model_description: The description of the Model. :param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in ``PredictRequest.instances``, ``ExplainRequest.instances`` and ``BatchPredictionJob.input_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. :param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via ``PredictRequest.parameters``, ``ExplainRequest.parameters`` and ``BatchPredictionJob.model_parameters``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform, if no parameters are supported it is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. :param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via ``PredictResponse.predictions``, ``ExplainResponse.explanations``, and ``BatchPredictionJob.output_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object <https://tinyurl.com/y538mdwt#schema-object>`__. AutoML Models always have this field populated by AI Platform. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. :param project_id: Project to run training in. :param region: Location to run training in. :param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. :param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed encryption key used to protect the training pipeline. Has the form: ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be in the same region as where the compute resource is created. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload`` is not set separately. :param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed encryption key used to protect the model. Has the form: ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be in the same region as where the compute resource is created. If set, the trained Model will be secured by this key. :param staging_bucket: Bucket used to stage source and training artifacts. :param dataset: Vertex AI to fit this training against. :param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema Object] (https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object) Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in respectively training, validation or test role, depending on the role of the DataItem they are on. When used in conjunction with ``annotations_filter``, the Annotations used for training are filtered by both ``annotations_filter`` and ``annotation_schema_uri``. :param model_display_name: If the script produces a managed Vertex AI Model. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon creation, the job's display_name is used. :param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. :param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the staging directory will be used. Vertex AI sets the following environment variables when it runs your training code: - AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. <base_output_dir>/model/ - AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. <base_output_dir>/checkpoints/ - AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. <base_output_dir>/logs/ :param service_account: Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. :param network: The full name of the Compute Engine network to which the job should be peered. Private services access must already be configured for the network. If left unspecified, the job is not peered with any network. :param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset. The BigQuery project location where the training data is to be written to. In the given project a new dataset is created with name ``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>`` where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All training input data will be written into that dataset. In the dataset three tables will be created, ``training``, ``validation`` and ``test``. - AIP_DATA_FORMAT = "bigquery". - AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training" - AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation" - AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test" :param args: Command line arguments to be passed to the Python script. :param environment_variables: Environment variables to be passed to the container. Should be a dictionary where keys are environment variable names and values are environment variable values for those names. At most 10 environment variables can be specified. The Name of the environment variable must be unique. :param replica_count: The number of worker replicas. If replica count = 1 then one chief replica will be provisioned. If replica_count > 1 the remainder will be provisioned as a worker replica pool. :param machine_type: The type of machine to use for training. :param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4 :param accelerator_count: The number of accelerators to attach to a worker replica. :param boot_disk_type: Type of the boot disk, default is `pd-ssd`. Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or `pd-standard` (Persistent Disk Hard Disk Drive). :param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB. boot disk size must be within the range of [100, 64000]. :param training_fraction_split: Optional. The fraction of the input data that is to be used to train the Model. This is ignored if Dataset is not provided. :param validation_fraction_split: Optional. The fraction of the input data that is to be used to validate the Model. This is ignored if Dataset is not provided. :param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate the Model. This is ignored if Dataset is not provided. :param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. :param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. :param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. :param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data columns. The value of the key (either the label's value or value in the column) must be one of {``training``, ``validation``, ``test``}, and it defines to which set the given piece of data is assigned. If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only for tabular and time series Datasets. :param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data columns. The value of the key values of the key (the values in the column) must be in RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only for tabular and time series Datasets. :param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload logs. Format: ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training :param sync: Whether to execute the AI Platform job synchronously. If False, this method will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. """ self._job = self.get_custom_training_job( project=project_id, location=region, display_name=display_name, script_path=script_path, container_uri=container_uri, requirements=requirements, model_serving_container_image_uri=model_serving_container_image_uri, model_serving_container_predict_route=model_serving_container_predict_route, model_serving_container_health_route=model_serving_container_health_route, model_serving_container_command=model_serving_container_command, model_serving_container_args=model_serving_container_args, model_serving_container_environment_variables=model_serving_container_environment_variables, model_serving_container_ports=model_serving_container_ports, model_description=model_description, model_instance_schema_uri=model_instance_schema_uri, model_parameters_schema_uri=model_parameters_schema_uri, model_prediction_schema_uri=model_prediction_schema_uri, labels=labels, training_encryption_spec_key_name=training_encryption_spec_key_name, model_encryption_spec_key_name=model_encryption_spec_key_name, staging_bucket=staging_bucket, ) if not self._job: raise AirflowException("CustomJob was not created") model = self._run_job( job=self._job, dataset=dataset, annotation_schema_uri=annotation_schema_uri, model_display_name=model_display_name, model_labels=model_labels, base_output_dir=base_output_dir, service_account=service_account, network=network, bigquery_destination=bigquery_destination, args=args, environment_variables=environment_variables, replica_count=replica_count, machine_type=machine_type, accelerator_type=accelerator_type, accelerator_count=accelerator_count, boot_disk_type=boot_disk_type, boot_disk_size_gb=boot_disk_size_gb, training_fraction_split=training_fraction_split, validation_fraction_split=validation_fraction_split, test_fraction_split=test_fraction_split, training_filter_split=training_filter_split, validation_filter_split=validation_filter_split, test_filter_split=test_filter_split, predefined_split_column_name=predefined_split_column_name, timestamp_split_column_name=timestamp_split_column_name, tensorboard=tensorboard, sync=sync, ) return model @GoogleBaseHook.fallback_to_default_project_id def delete_pipeline_job( self, project_id: str, region: str, pipeline_job: str, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Operation: """ Deletes a PipelineJob. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param pipeline_job: Required. The name of the PipelineJob resource to be deleted. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_pipeline_service_client(region) name = client.pipeline_job_path(project_id, region, pipeline_job) result = client.delete_pipeline_job( request={ 'name': name, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def delete_training_pipeline( self, project_id: str, region: str, training_pipeline: str, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Operation: """ Deletes a TrainingPipeline. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param training_pipeline: Required. The name of the TrainingPipeline resource to be deleted. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_pipeline_service_client(region) name = client.training_pipeline_path(project_id, region, training_pipeline) result = client.delete_training_pipeline( request={ 'name': name, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def delete_custom_job( self, project_id: str, region: str, custom_job: str, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Operation: """ Deletes a CustomJob. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param custom_job: Required. The name of the CustomJob to delete. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_job_service_client(region) name = client.custom_job_path(project_id, region, custom_job) result = client.delete_custom_job( request={ 'name': name, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def get_pipeline_job( self, project_id: str, region: str, pipeline_job: str, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> PipelineJob: """ Gets a PipelineJob. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param pipeline_job: Required. The name of the PipelineJob resource. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_pipeline_service_client(region) name = client.pipeline_job_path(project_id, region, pipeline_job) result = client.get_pipeline_job( request={ 'name': name, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def get_training_pipeline( self, project_id: str, region: str, training_pipeline: str, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> TrainingPipeline: """ Gets a TrainingPipeline. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param training_pipeline: Required. The name of the TrainingPipeline resource. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_pipeline_service_client(region) name = client.training_pipeline_path(project_id, region, training_pipeline) result = client.get_training_pipeline( request={ 'name': name, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def get_custom_job( self, project_id: str, region: str, custom_job: str, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> CustomJob: """ Gets a CustomJob. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param custom_job: Required. The name of the CustomJob to get. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_job_service_client(region) name = JobServiceClient.custom_job_path(project_id, region, custom_job) result = client.get_custom_job( request={ 'name': name, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def list_pipeline_jobs( self, project_id: str, region: str, page_size: Optional[int] = None, page_token: Optional[str] = None, filter: Optional[str] = None, order_by: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> ListPipelineJobsPager: """ Lists PipelineJobs in a Location. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param filter: Optional. Lists the PipelineJobs that match the filter expression. The following fields are supported: - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. - ``display_name``: Supports ``=``, ``!=`` comparisons, and ``:`` wildcard. - ``pipeline_job_user_id``: Supports ``=``, ``!=`` comparisons, and ``:`` wildcard. for example, can check if pipeline's display_name contains *step* by doing display_name:"*step*" - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 format. - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 format. - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 format. - ``labels``: Supports key-value equality and key presence. Filter expressions can be combined together using logical operators (``AND`` & ``OR``). For example: ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. The syntax to define filter expression is based on https://google.aip.dev/160. :param page_size: Optional. The standard list page size. :param page_token: Optional. The standard list page token. Typically obtained via [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token] of the previous [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] call. :param order_by: Optional. A comma-separated list of fields to order by. The default sort order is in ascending order. Use "desc" after a field name for descending. You can have multiple order_by fields provided e.g. "create_time desc, end_time", "end_time, start_time, update_time" For example, using "create_time desc, end_time" will order results by create time in descending order, and if there are multiple jobs having the same create time, order them by the end time in ascending order. if order_by is not specified, it will order by default order is create time in descending order. Supported fields: - ``create_time`` - ``update_time`` - ``end_time`` - ``start_time`` :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_pipeline_service_client(region) parent = client.common_location_path(project_id, region) result = client.list_pipeline_jobs( request={ 'parent': parent, 'page_size': page_size, 'page_token': page_token, 'filter': filter, 'order_by': order_by, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def list_training_pipelines( self, project_id: str, region: str, page_size: Optional[int] = None, page_token: Optional[str] = None, filter: Optional[str] = None, read_mask: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> ListTrainingPipelinesPager: """ Lists TrainingPipelines in a Location. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param filter: Optional. The standard list filter. Supported fields: - ``display_name`` supports = and !=. - ``state`` supports = and !=. Some examples of using the filter are: - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` - ``NOT display_name="my_pipeline"`` - ``state="PIPELINE_STATE_FAILED"`` :param page_size: Optional. The standard list page size. :param page_token: Optional. The standard list page token. Typically obtained via [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token] of the previous [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] call. :param read_mask: Optional. Mask specifying which fields to read. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_pipeline_service_client(region) parent = client.common_location_path(project_id, region) result = client.list_training_pipelines( request={ 'parent': parent, 'page_size': page_size, 'page_token': page_token, 'filter': filter, 'read_mask': read_mask, }, retry=retry, timeout=timeout, metadata=metadata, ) return result @GoogleBaseHook.fallback_to_default_project_id def list_custom_jobs( self, project_id: str, region: str, page_size: Optional[int], page_token: Optional[str], filter: Optional[str], read_mask: Optional[str], retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> ListCustomJobsPager: """ Lists CustomJobs in a Location. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param filter: Optional. The standard list filter. Supported fields: - ``display_name`` supports = and !=. - ``state`` supports = and !=. Some examples of using the filter are: - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` - ``NOT display_name="my_pipeline"`` - ``state="PIPELINE_STATE_FAILED"`` :param page_size: Optional. The standard list page size. :param page_token: Optional. The standard list page token. Typically obtained via [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token] of the previous [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] call. :param read_mask: Optional. Mask specifying which fields to read. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. """ client = self.get_job_service_client(region) parent = JobServiceClient.common_location_path(project_id, region) result = client.list_custom_jobs( request={ 'parent': parent, 'page_size': page_size, 'page_token': page_token, 'filter': filter, 'read_mask': read_mask, }, retry=retry, timeout=timeout, metadata=metadata, ) return result
Acehaidrey/incubator-airflow
airflow/providers/google/cloud/hooks/vertex_ai/custom_job.py
Python
apache-2.0
108,824
[ "VisIt" ]
020c904e4691ead95cbc391fa7463e0929d7d90a830528abbe6d107cc1799bdb
# Bar Learning example # # authors: Julien Vitay, Helge Uelo Dinkelbach from ANNarchy import * setup(paradigm="cuda") # Input neuron: r is set externally InputNeuron = Neuron(parameters="r = 0.0") # Leaky neuron LeakyNeuron = Neuron( parameters=""" tau = 10.0 : population """, equations=""" tau * dr/dt + r = sum(exc) - sum(inh) : min=0.0 """ ) # Oja synapse Oja = Synapse( parameters=""" tau = 2000.0 : postsynaptic alpha = 8.0 : postsynaptic min_w = 0.0 : postsynaptic """, equations=""" tau * dw/dt = pre.r * post.r - alpha * post.r^2 * w : min=min_w """ ) # Creating the populations Input = Population(geometry=(8, 8), neuron=InputNeuron) Feature = Population(geometry=(8, 4), neuron=LeakyNeuron) # Creating the projections ff = Projection( pre=Input, post=Feature, target='exc', synapse = Oja ) ff.connect_all_to_all(weights = Uniform(-0.5, 0.5), storage_format="csr") ff.min_w = -10.0 lat = Projection( pre=Feature, post=Feature, target='inh', synapse = Oja ) lat.connect_all_to_all(weights = Uniform(0.0, 1.0), storage_format="csr") lat.alpha = 0.3 # every 200 trials we update # the receptive fields period = 200 count = 0 # Definition of the environment def trial(): global count count+=1 # Reset the firing rate for all neurons Input.r = 0.0 # Clamp horizontal bars randomly for h in range(Input.geometry[0]): if np.random.random() < 1.0/ float(Input.geometry[0]): Input[h, :].r = 1.0 # Clamp vertical bars randomly for w in range(Input.geometry[1]): if np.random.random() < 1.0/ float(Input.geometry[1]): Input[:, w].r = 1.0 # Simulate for 50ms simulate(50.) # Return firing rates and receptive fields if count < period: return Input.r, Feature.r, None else: count = 0 return Input.r, Feature.r, ff.receptive_fields() if __name__=='__main__': compile() # Create and launch the GUI from Viz import Viewer view = Viewer(func=trial) view.run()
vitay/ANNarchy
examples/bar_learning/BarLearningGPU.py
Python
gpl-2.0
2,153
[ "NEURON" ]
f5bc90a4a1e4acd4af7af4bbd3352092082accb4cdcba030e9935569a034ea56
import math as m import warnings import numpy as nu from scipy import integrate import galpy.util.bovy_plot as plot import galpy.util.bovy_symplecticode as symplecticode from galpy.util.bovy_conversion import physical_conversion from galpy.orbit_src.OrbitTop import OrbitTop from galpy.potential_src.planarPotential import evaluateplanarRforces,\ RZToplanarPotential, evaluateplanarphiforces,\ evaluateplanarPotentials from galpy.potential_src.Potential import Potential from galpy.util import galpyWarning #try: from galpy.orbit_src.integratePlanarOrbit import integratePlanarOrbit_c,\ integratePlanarOrbit_dxdv_c, _ext_loaded ext_loaded= _ext_loaded class planarOrbitTop(OrbitTop): """Top-level class representing a planar orbit (i.e., one in the plane of a galaxy)""" def __init__(self,vxvv=None,vo=220.,ro=8.0,zo=0.025, solarmotion=nu.array([-10.1,4.0,6.7])): #pragma: no cover (never used) """ NAME: __init__ PURPOSE: Initialize a planar orbit INPUT: vxvv - [R,vR,vT(,phi)] vo - circular velocity at ro (km/s) ro - distance from vantage point to GC (kpc) zo - offset toward the NGP of the Sun wrt the plane (kpc) solarmotion - value in [-U,V,W] (km/s) OUTPUT: HISTORY: 2010-07-12 - Written - Bovy (NYU) 2014-06-11 - Added conversion kwargs to physical coordinates - Bovy (IAS) """ OrbitTop.__init__(self,vxvv=vxvv, ro=ro,zo=zo,vo=vo,solarmotion=solarmotion) return None def e(self,analytic=False,pot=None): """ NAME: e PURPOSE: calculate the eccentricity INPUT: analytic - compute this analytically pot - potential to use for analytical calculation OUTPUT: eccentricity HISTORY: 2010-09-15 - Written - Bovy (NYU) """ if analytic: self._setupaA(pot=pot,type='adiabatic') (rperi,rap)= self._aA.calcRapRperi(self) return (rap-rperi)/(rap+rperi) if not hasattr(self,'orbit'): raise AttributeError("Integrate the orbit first") if not hasattr(self,'rs'): self.rs= self.orbit[:,0] return (nu.amax(self.rs)-nu.amin(self.rs))/(nu.amax(self.rs)+nu.amin(self.rs)) @physical_conversion('energy') def Jacobi(self,*args,**kwargs): """ NAME: Jacobi PURPOSE: calculate the Jacobi integral of the motion INPUT: t - (optional) time at which to get the radius OmegaP= pattern speed of rotating frame pot= potential instance or list of such instances OUTPUT: Jacobi integral HISTORY: 2011-04-18 - Written - Bovy (NYU) """ if not 'OmegaP' in kwargs or kwargs['OmegaP'] is None: OmegaP= 1. if not 'pot' in kwargs or kwargs['pot'] is None: try: pot= self._pot except AttributeError: raise AttributeError("Integrate orbit or specify pot=") else: pot= kwargs['pot'] if isinstance(pot,list): for p in pot: if hasattr(p,'OmegaP'): OmegaP= p.OmegaP() break else: if hasattr(pot,'OmegaP'): OmegaP= pot.OmegaP() kwargs.pop('OmegaP',None) else: OmegaP= kwargs.pop('OmegaP') #Make sure you are not using physical coordinates old_physical= kwargs.get('use_physical',None) kwargs['use_physical']= False out= self.E(*args,**kwargs)-OmegaP*self.L(*args,**kwargs) if not old_physical is None: kwargs['use_physical']= old_physical else: kwargs.pop('use_physical') return out @physical_conversion('position') def rap(self,analytic=False,pot=None,**kwargs): """ NAME: rap PURPOSE: return the apocenter radius INPUT: analytic - compute this analytically pot - potential to use for analytical calculation OUTPUT: R_ap HISTORY: 2010-09-20 - Written - Bovy (NYU) """ if analytic: self._setupaA(pot=pot,type='adiabatic') (rperi,rap)= self._aA.calcRapRperi(self) return rap if not hasattr(self,'orbit'): raise AttributeError("Integrate the orbit first") if not hasattr(self,'rs'): self.rs= self.orbit[:,0] return nu.amax(self.rs) @physical_conversion('position') def rperi(self,analytic=False,pot=None,**kwargs): """ NAME: rperi PURPOSE: return the pericenter radius INPUT: analytic - compute this analytically pot - potential to use for analytical calculation OUTPUT: R_peri HISTORY: 2010-09-20 - Written - Bovy (NYU) """ if analytic: self._setupaA(pot=pot,type='adiabatic') (rperi,rap)= self._aA.calcRapRperi(self) return rperi if not hasattr(self,'orbit'): raise AttributeError("Integrate the orbit first") if not hasattr(self,'rs'): self.rs= self.orbit[:,0] return nu.amin(self.rs) @physical_conversion('position') def zmax(self,pot=None,analytic=False,**kwargs): raise AttributeError("planarOrbit does not have a zmax") class planarROrbit(planarOrbitTop): """Class representing a planar orbit, without \phi. Useful for orbit-integration in axisymmetric potentials when you don't care about the azimuth""" def __init__(self,vxvv=[1.,0.,1.],vo=220.,ro=8.0,zo=0.025, solarmotion=nu.array([-10.1,4.0,6.7])): """ NAME: __init__ PURPOSE: Initialize a planarROrbit INPUT: vxvv - [R,vR,vT] vo - circular velocity at ro (km/s) ro - distance from vantage point to GC (kpc) zo - offset toward the NGP of the Sun wrt the plane (kpc) solarmotion - value in [-U,V,W] (km/s) OUTPUT: HISTORY: 2010-07-12 - Written - Bovy (NYU) 2014-06-11 - Added conversion kwargs to physical coordinates - Bovy (IAS) """ OrbitTop.__init__(self,vxvv=vxvv, ro=ro,zo=zo,vo=vo,solarmotion=solarmotion) return None def integrate(self,t,pot,method='symplec4_c',dt=None): """ NAME: integrate PURPOSE: integrate the orbit INPUT: t - list of times at which to output (0 has to be in this!) pot - potential instance or list of instances method= 'odeint' for scipy's odeint 'leapfrog' for a simple leapfrog implementation 'leapfrog_c' for a simple leapfrog implementation in C 'rk4_c' for a 4th-order Runge-Kutta integrator in C 'rk6_c' for a 6-th order Runge-Kutta integrator in C 'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest) dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize OUTPUT: error message number (get the actual orbit using getOrbit() HISTORY: 2010-07-20 """ if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp') if hasattr(self,'rs'): delattr(self,'rs') thispot= RZToplanarPotential(pot) self.t= nu.array(t) self._pot= thispot self.orbit, msg= _integrateROrbit(self.vxvv,thispot,t,method,dt) return msg @physical_conversion('energy') def E(self,*args,**kwargs): """ NAME: E PURPOSE: calculate the energy INPUT: t - (optional) time at which to get the radius pot= potential instance or list of such instances OUTPUT: energy HISTORY: 2010-09-15 - Written - Bovy (NYU) 2011-04-18 - Added t - Bovy (NYU) """ if not 'pot' in kwargs or kwargs['pot'] is None: try: pot= self._pot except AttributeError: raise AttributeError("Integrate orbit or specify pot=") if 'pot' in kwargs and kwargs['pot'] is None: kwargs.pop('pot') else: pot= kwargs.pop('pot') if isinstance(pot,Potential): thispot= RZToplanarPotential(pot) elif isinstance(pot,list): thispot= [] for p in pot: if isinstance(p,Potential): thispot.append(RZToplanarPotential(p)) else: thispot.append(p) else: thispot= pot if len(args) > 0: t= args[0] else: t= 0. #Get orbit thiso= self(*args,**kwargs) onet= (len(thiso.shape) == 1) if onet: return evaluateplanarPotentials(thiso[0],thispot, t=t)\ +thiso[1]**2./2.\ +thiso[2]**2./2. else: return nu.array([evaluateplanarPotentials(thiso[0,ii],thispot, t=t[ii])\ +thiso[1,ii]**2./2.\ +thiso[2,ii]**2./2. for ii in range(len(t))]) class planarOrbit(planarOrbitTop): """Class representing a full planar orbit (R,vR,vT,phi)""" def __init__(self,vxvv=[1.,0.,1.,0.],vo=220.,ro=8.0,zo=0.025, solarmotion=nu.array([-10.1,4.0,6.7])): """ NAME: __init__ PURPOSE: Initialize a planarOrbit INPUT: vxvv - [R,vR,vT,phi] vo - circular velocity at ro (km/s) ro - distance from vantage point to GC (kpc) zo - offset toward the NGP of the Sun wrt the plane (kpc) solarmotion - value in [-U,V,W] (km/s) OUTPUT: HISTORY: 2010-07-12 - Written - Bovy (NYU) 2014-06-11 - Added conversion kwargs to physical coordinates - Bovy (IAS) """ if len(vxvv) == 3: #pragma: no cover raise ValueError("You only provided R,vR, & vT, but not phi; you probably want planarROrbit") OrbitTop.__init__(self,vxvv=vxvv, ro=ro,zo=zo,vo=vo,solarmotion=solarmotion) return None def integrate(self,t,pot,method='symplec4_c',dt=None): """ NAME: integrate PURPOSE: integrate the orbit INPUT: t - list of times at which to output (0 has to be in this!) pot - potential instance or list of instances method= 'odeint' for scipy's odeint 'leapfrog' for a simple leapfrog implementation 'leapfrog_c' for a simple leapfrog implementation in C 'rk4_c' for a 4th-order Runge-Kutta integrator in C 'rk6_c' for a 6-th order Runge-Kutta integrator in C 'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest) dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize OUTPUT: (none) (get the actual orbit using getOrbit() HISTORY: 2010-07-20 """ if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp') if hasattr(self,'rs'): delattr(self,'rs') thispot= RZToplanarPotential(pot) self.t= nu.array(t) self._pot= thispot self.orbit, msg= _integrateOrbit(self.vxvv,thispot,t,method,dt) return msg def integrate_dxdv(self,dxdv,t,pot,method='dopr54_c', rectIn=False,rectOut=False): """ NAME: integrate_dxdv PURPOSE: integrate the orbit and a small area of phase space INPUT: dxdv - [dR,dvR,dvT,dphi] t - list of times at which to output (0 has to be in this!) pot - potential instance or list of instances method= 'odeint' for scipy's odeint 'rk4_c' for a 4th-order Runge-Kutta integrator in C 'rk6_c' for a 6-th order Runge-Kutta integrator in C 'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest) rectIn= (False) if True, input dxdv is in rectangular coordinates rectOut= (False) if True, output dxdv (that in orbit_dxdv) is in rectangular coordinates OUTPUT: (none) (get the actual orbit using getOrbit_dxdv() HISTORY: 2010-10-17 - Written - Bovy (IAS) 2014-06-29 - Added rectIn and rectOut - Bovy (IAS) """ if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp') if hasattr(self,'rs'): delattr(self,'rs') thispot= RZToplanarPotential(pot) self.t= nu.array(t) self._pot_dxdv= thispot self._pot= thispot self.orbit_dxdv, msg= _integrateOrbit_dxdv(self.vxvv,dxdv,thispot,t, method,rectIn,rectOut) self.orbit= self.orbit_dxdv[:,:4] return msg @physical_conversion('energy') def E(self,*args,**kwargs): """ NAME: E PURPOSE: calculate the energy INPUT: pot= t= time at which to evaluate E OUTPUT: energy HISTORY: 2010-09-15 - Written - Bovy (NYU) """ if not 'pot' in kwargs or kwargs['pot'] is None: try: pot= self._pot except AttributeError: raise AttributeError("Integrate orbit or specify pot=") if 'pot' in kwargs and kwargs['pot'] is None: kwargs.pop('pot') else: pot= kwargs.pop('pot') if isinstance(pot,Potential): thispot= RZToplanarPotential(pot) elif isinstance(pot,list): thispot= [] for p in pot: if isinstance(p,Potential): thispot.append(RZToplanarPotential(p)) else: thispot.append(p) else: thispot= pot if len(args) > 0: t= args[0] else: t= 0. #Get orbit thiso= self(*args,**kwargs) onet= (len(thiso.shape) == 1) if onet: return evaluateplanarPotentials(thiso[0],thispot, phi=thiso[3],t=t)\ +thiso[1]**2./2.\ +thiso[2]**2./2. else: return nu.array([evaluateplanarPotentials(thiso[0,ii],thispot, phi=thiso[3,ii], t=t[ii])\ +thiso[1,ii]**2./2.\ +thiso[2,ii]**2./2. for ii in range(len(t))]) def e(self,analytic=False,pot=None): """ NAME: e PURPOSE: calculate the eccentricity INPUT: analytic - calculate e analytically pot - potential used to analytically calculate e OUTPUT: eccentricity HISTORY: 2010-09-15 - Written - Bovy (NYU) """ if analytic: self._setupaA(pot=pot,type='adiabatic') (rperi,rap)= self._aA.calcRapRperi(self) return (rap-rperi)/(rap+rperi) if not hasattr(self,'orbit'): raise AttributeError("Integrate the orbit first") if not hasattr(self,'rs'): self.rs= self.orbit[:,0] return (nu.amax(self.rs)-nu.amin(self.rs))/(nu.amax(self.rs)+nu.amin(self.rs)) def _integrateROrbit(vxvv,pot,t,method,dt): """ NAME: _integrateROrbit PURPOSE: integrate an orbit in a Phi(R) potential in the R-plane INPUT: vxvv - array with the initial conditions stacked like [R,vR,vT]; vR outward! pot - Potential instance t - list of times at which to output (0 has to be in this!) method - 'odeint' or 'leapfrog' dt - if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize OUTPUT: [:,3] array of [R,vR,vT] at each t HISTORY: 2010-07-20 - Written - Bovy (NYU) """ #First check that the potential has C if '_c' in method: if isinstance(pot,list): allHasC= nu.prod([p.hasC for p in pot]) else: allHasC= pot.hasC if not allHasC and ('leapfrog' in method or 'symplec' in method): method= 'leapfrog' elif not allHasC: method= 'odeint' if method.lower() == 'leapfrog': #We hack this by putting in a dummy phi this_vxvv= nu.zeros(len(vxvv)+1) this_vxvv[0:len(vxvv)]= vxvv tmp_out, msg= _integrateOrbit(this_vxvv,pot,t,method,dt) #tmp_out is (nt,4) out= tmp_out[:,0:3] elif method.lower() == 'leapfrog_c' or method.lower() == 'rk4_c' \ or method.lower() == 'rk6_c' or method.lower() == 'symplec4_c' \ or method.lower() == 'symplec6_c' or method.lower() == 'dopr54_c': #We hack this by putting in a dummy phi this_vxvv= nu.zeros(len(vxvv)+1) this_vxvv[0:len(vxvv)]= vxvv tmp_out, msg= _integrateOrbit(this_vxvv,pot,t,method,dt) #tmp_out is (nt,4) out= tmp_out[:,0:3] elif method.lower() == 'odeint': l= vxvv[0]*vxvv[2] l2= l**2. init= [vxvv[0],vxvv[1]] intOut= integrate.odeint(_REOM,init,t,args=(pot,l2), rtol=10.**-8.)#,mxstep=100000000) out= nu.zeros((len(t),3)) out[:,0]= intOut[:,0] out[:,1]= intOut[:,1] out[:,2]= l/out[:,0] msg= 0 #post-process to remove negative radii neg_radii= (out[:,0] < 0.) out[neg_radii,0]= -out[neg_radii,0] _parse_warnmessage(msg) return (out,msg) def _REOM(y,t,pot,l2): """ NAME: _REOM PURPOSE: implements the EOM, i.e., the right-hand side of the differential equation INPUT: y - current phase-space position t - current time pot - (list of) Potential instance(s) l2 - angular momentum squared OUTPUT: dy/dt HISTORY: 2010-07-20 - Written - Bovy (NYU) """ return [y[1], l2/y[0]**3.+evaluateplanarRforces(y[0],pot,t=t)] def _integrateOrbit(vxvv,pot,t,method,dt): """ NAME: _integrateOrbit PURPOSE: integrate an orbit in a Phi(R) potential in the (R,phi)-plane INPUT: vxvv - array with the initial conditions stacked like [R,vR,vT,phi]; vR outward! pot - Potential instance t - list of times at which to output (0 has to be in this!) method - 'odeint' or 'leapfrog' dt- if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize OUTPUT: [:,4] array of [R,vR,vT,phi] at each t HISTORY: 2010-07-20 - Written - Bovy (NYU) """ #First check that the potential has C if '_c' in method: if isinstance(pot,list): allHasC= nu.prod([p.hasC for p in pot]) else: allHasC= pot.hasC if not allHasC and ('leapfrog' in method or 'symplec' in method): method= 'leapfrog' elif not allHasC: method= 'odeint' if method.lower() == 'leapfrog': #go to the rectangular frame this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[3]), vxvv[0]*nu.sin(vxvv[3]), vxvv[1]*nu.cos(vxvv[3])-vxvv[2]*nu.sin(vxvv[3]), vxvv[2]*nu.cos(vxvv[3])+vxvv[1]*nu.sin(vxvv[3])]) #integrate tmp_out= symplecticode.leapfrog(_rectForce,this_vxvv, t,args=(pot,),rtol=10.**-8) #go back to the cylindrical frame R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.) phi= nu.arccos(tmp_out[:,0]/R) phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)] vR= tmp_out[:,2]*nu.cos(phi)+tmp_out[:,3]*nu.sin(phi) vT= tmp_out[:,3]*nu.cos(phi)-tmp_out[:,2]*nu.sin(phi) out= nu.zeros((len(t),4)) out[:,0]= R out[:,1]= vR out[:,2]= vT out[:,3]= phi msg= 0 elif method.lower() == 'leapfrog_c' or method.lower() == 'rk4_c' \ or method.lower() == 'rk6_c' or method.lower() == 'symplec4_c' \ or method.lower() == 'symplec6_c' or method.lower() == 'dopr54_c': warnings.warn("Using C implementation to integrate orbits",galpyWarning) #go to the rectangular frame this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[3]), vxvv[0]*nu.sin(vxvv[3]), vxvv[1]*nu.cos(vxvv[3])-vxvv[2]*nu.sin(vxvv[3]), vxvv[2]*nu.cos(vxvv[3])+vxvv[1]*nu.sin(vxvv[3])]) #integrate tmp_out, msg= integratePlanarOrbit_c(pot,this_vxvv, t,method,dt=dt) #go back to the cylindrical frame R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.) phi= nu.arccos(tmp_out[:,0]/R) phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)] vR= tmp_out[:,2]*nu.cos(phi)+tmp_out[:,3]*nu.sin(phi) vT= tmp_out[:,3]*nu.cos(phi)-tmp_out[:,2]*nu.sin(phi) out= nu.zeros((len(t),4)) out[:,0]= R out[:,1]= vR out[:,2]= vT out[:,3]= phi elif method.lower() == 'odeint': vphi= vxvv[2]/vxvv[0] init= [vxvv[0],vxvv[1],vxvv[3],vphi] intOut= integrate.odeint(_EOM,init,t,args=(pot,), rtol=10.**-8.)#,mxstep=100000000) out= nu.zeros((len(t),4)) out[:,0]= intOut[:,0] out[:,1]= intOut[:,1] out[:,3]= intOut[:,2] out[:,2]= out[:,0]*intOut[:,3] msg= 0 else: raise NotImplementedError("requested integration method does not exist") #post-process to remove negative radii neg_radii= (out[:,0] < 0.) out[neg_radii,0]= -out[neg_radii,0] out[neg_radii,3]+= m.pi _parse_warnmessage(msg) return (out,msg) def _integrateOrbit_dxdv(vxvv,dxdv,pot,t,method,rectIn,rectOut): """ NAME: _integrateOrbit_dxdv PURPOSE: integrate an orbit and area of phase space in a Phi(R) potential in the (R,phi)-plane INPUT: vxvv - array with the initial conditions stacked like [R,vR,vT,phi]; vR outward! dxdv - difference to integrate [dR,dvR,dvT,dphi] pot - Potential instance t - list of times at which to output (0 has to be in this!) method - 'odeint' or 'leapfrog' rectIn= (False) if True, input dxdv is in rectangular coordinates rectOut= (False) if True, output dxdv (that in orbit_dxdv) is in rectangular coordinates OUTPUT: [:,8] array of [R,vR,vT,phi,dR,dvR,dvT,dphi] at each t error message from integrator HISTORY: 2010-10-17 - Written - Bovy (IAS) """ #First check that the potential has C if '_c' in method: if isinstance(pot,list): allHasC= nu.prod([p.hasC and p.hasC_dxdv for p in pot]) else: allHasC= pot.hasC and pot.hasC_dxdv if not allHasC and not 'leapfrog' in method and not 'symplec' in method: method= 'odeint' warnings.warn("Using odeint because not all used potential have adequate C implementations to integrate phase-space volumes",galpyWarning) #go to the rectangular frame this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[3]), vxvv[0]*nu.sin(vxvv[3]), vxvv[1]*nu.cos(vxvv[3])-vxvv[2]*nu.sin(vxvv[3]), vxvv[2]*nu.cos(vxvv[3])+vxvv[1]*nu.sin(vxvv[3])]) if not rectIn: this_dxdv= nu.array([nu.cos(vxvv[3])*dxdv[0] -vxvv[0]*nu.sin(vxvv[3])*dxdv[3], nu.sin(vxvv[3])*dxdv[0] +vxvv[0]*nu.cos(vxvv[3])*dxdv[3], -(vxvv[1]*nu.sin(vxvv[3]) +vxvv[2]*nu.cos(vxvv[3]))*dxdv[3] +nu.cos(vxvv[3])*dxdv[1]-nu.sin(vxvv[3])*dxdv[2], (vxvv[1]*nu.cos(vxvv[3]) -vxvv[2]*nu.sin(vxvv[3]))*dxdv[3] +nu.sin(vxvv[3])*dxdv[1]+nu.cos(vxvv[3])*dxdv[2]]) else: this_dxdv= dxdv if 'leapfrog' in method.lower() or 'symplec' in method.lower(): raise TypeError('Symplectic integration for phase-space volume is not possible') elif method.lower() == 'rk4_c' or method.lower() == 'rk6_c' \ or method.lower() == 'dopr54_c': warnings.warn("Using C implementation to integrate orbits",galpyWarning) #integrate tmp_out, msg= integratePlanarOrbit_dxdv_c(pot,this_vxvv,this_dxdv, t,method) elif method.lower() == 'odeint': init= [this_vxvv[0],this_vxvv[1],this_vxvv[2],this_vxvv[3], this_dxdv[0],this_dxdv[1],this_dxdv[2],this_dxdv[3]] #integrate tmp_out= integrate.odeint(_EOM_dxdv,init,t,args=(pot,), rtol=10.**-8.)#,mxstep=100000000) msg= 0 else: raise NotImplementedError("requested integration method does not exist") #go back to the cylindrical frame R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.) phi= nu.arccos(tmp_out[:,0]/R) phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)] vR= tmp_out[:,2]*nu.cos(phi)+tmp_out[:,3]*nu.sin(phi) vT= tmp_out[:,3]*nu.cos(phi)-tmp_out[:,2]*nu.sin(phi) cp= nu.cos(phi) sp= nu.sin(phi) dR= cp*tmp_out[:,4]+sp*tmp_out[:,5] dphi= (cp*tmp_out[:,5]-sp*tmp_out[:,4])/R dvR= cp*tmp_out[:,6]+sp*tmp_out[:,7]+vT*dphi dvT= cp*tmp_out[:,7]-sp*tmp_out[:,6]-vR*dphi out= nu.zeros((len(t),8)) out[:,0]= R out[:,1]= vR out[:,2]= vT out[:,3]= phi if rectOut: out[:,4:]= tmp_out[:,4:] else: out[:,4]= dR out[:,7]= dphi out[:,5]= dvR out[:,6]= dvT _parse_warnmessage(msg) return (out,msg) def _EOM_dxdv(x,t,pot): """ NAME: _EOM_dxdv PURPOSE: implements the EOM, i.e., the right-hand side of the differential equation, for integrating phase space differences, rectangular INPUT: x - current phase-space position t - current time pot - (list of) Potential instance(s) OUTPUT: dy/dt HISTORY: 2011-10-18 - Written - Bovy (NYU) """ #x is rectangular so calculate R and phi R= nu.sqrt(x[0]**2.+x[1]**2.) phi= nu.arccos(x[0]/R) sinphi= x[1]/R cosphi= x[0]/R if x[1] < 0.: phi= 2.*nu.pi-phi #calculate forces Rforce= evaluateplanarRforces(R,pot,phi=phi,t=t) phiforce= evaluateplanarphiforces(R,pot,phi=phi,t=t) R2deriv= evaluateplanarPotentials(R,pot,phi=phi,t=t,dR=2) phi2deriv= evaluateplanarPotentials(R,pot,phi=phi,t=t,dphi=2) Rphideriv= evaluateplanarPotentials(R,pot,phi=phi,t=t,dR=1,dphi=1) #Calculate derivatives and derivatives+time derivatives dFxdx= -cosphi**2.*R2deriv\ +2.*cosphi*sinphi/R**2.*phiforce\ +sinphi**2./R*Rforce\ +2.*sinphi*cosphi/R*Rphideriv\ -sinphi**2./R**2.*phi2deriv dFxdy= -sinphi*cosphi*R2deriv\ +(sinphi**2.-cosphi**2.)/R**2.*phiforce\ -cosphi*sinphi/R*Rforce\ -(cosphi**2.-sinphi**2.)/R*Rphideriv\ +cosphi*sinphi/R**2.*phi2deriv dFydx= -cosphi*sinphi*R2deriv\ +(sinphi**2.-cosphi**2.)/R**2.*phiforce\ +(sinphi**2.-cosphi**2.)/R*Rphideriv\ -sinphi*cosphi/R*Rforce\ +sinphi*cosphi/R**2.*phi2deriv dFydy= -sinphi**2.*R2deriv\ -2.*sinphi*cosphi/R**2.*phiforce\ -2.*sinphi*cosphi/R*Rphideriv\ +cosphi**2./R*Rforce\ -cosphi**2./R**2.*phi2deriv return nu.array([x[2],x[3], cosphi*Rforce-1./R*sinphi*phiforce, sinphi*Rforce+1./R*cosphi*phiforce, x[6],x[7], dFxdx*x[4]+dFxdy*x[5], dFydx*x[4]+dFydy*x[5]]) def _EOM(y,t,pot): """ NAME: _EOM PURPOSE: implements the EOM, i.e., the right-hand side of the differential equation INPUT: y - current phase-space position t - current time pot - (list of) Potential instance(s) l2 - angular momentum squared OUTPUT: dy/dt HISTORY: 2010-07-20 - Written - Bovy (NYU) """ l2= (y[0]**2.*y[3])**2. return [y[1], l2/y[0]**3.+evaluateplanarRforces(y[0],pot,phi=y[2],t=t), y[3], 1./y[0]**2.*(evaluateplanarphiforces(y[0],pot,phi=y[2],t=t)- 2.*y[0]*y[1]*y[3])] def _rectForce(x,pot,t=0.): """ NAME: _rectForce PURPOSE: returns the force in the rectangular frame INPUT: x - current position t - current time pot - (list of) Potential instance(s) OUTPUT: force HISTORY: 2011-02-02 - Written - Bovy (NYU) """ #x is rectangular so calculate R and phi R= nu.sqrt(x[0]**2.+x[1]**2.) phi= nu.arccos(x[0]/R) sinphi= x[1]/R cosphi= x[0]/R if x[1] < 0.: phi= 2.*nu.pi-phi #calculate forces Rforce= evaluateplanarRforces(R,pot,phi=phi,t=t) phiforce= evaluateplanarphiforces(R,pot,phi=phi,t=t) return nu.array([cosphi*Rforce-1./R*sinphi*phiforce, sinphi*Rforce+1./R*cosphi*phiforce]) def _parse_warnmessage(msg): if msg == 1: #pragma: no cover warnings.warn("During numerical integration, steps smaller than the smallest step were requested; integration might not be accurate",galpyWarning)
followthesheep/galpy
galpy/orbit_src/planarOrbit.py
Python
bsd-3-clause
30,816
[ "Galaxy" ]
14cd7e0e7df49a2764ba2d77952d137c6450c2ca72570907eeb49180205c2295
#!/usr/bin/env python ################################################## ## DEPENDENCIES import sys import os import os.path try: import builtins as builtin except ImportError: import __builtin__ as builtin from os.path import getmtime, exists import time import types from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple from Cheetah.Template import Template from Cheetah.DummyTransaction import * from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList from Cheetah.CacheRegion import CacheRegion import Cheetah.Filters as Filters import Cheetah.ErrorCatchers as ErrorCatchers ################################################## ## MODULE CONSTANTS VFFSL=valueFromFrameOrSearchList VFSL=valueFromSearchList VFN=valueForName currentTime=time.time __CHEETAH_version__ = '2.4.4' __CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0) __CHEETAH_genTime__ = 1406885498.4668 __CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014' __CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/movielist.tmpl' __CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014' __CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine' if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple: raise AssertionError( 'This template was compiled with Cheetah version' ' %s. Templates compiled before version %s must be recompiled.'%( __CHEETAH_version__, RequiredCheetahVersion)) ################################################## ## CLASSES class movielist(Template): ################################################## ## CHEETAH GENERATED METHODS def __init__(self, *args, **KWs): super(movielist, self).__init__(*args, **KWs) if not self._CHEETAH__instanceInitialized: cheetahKWArgs = {} allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split() for k,v in KWs.items(): if k in allowedKWs: cheetahKWArgs[k] = v self._initCheetahInstance(**cheetahKWArgs) def respond(self, trans=None): ## CHEETAH: main method generated for this template if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)): trans = self.transaction # is None unless self.awake() was called if not trans: trans = DummyTransaction() _dummyTrans = True else: _dummyTrans = False write = trans.response().write SL = self._CHEETAH__searchList _filter = self._CHEETAH__currentFilter ######################################## ## START - generated method body _orig_filter_90714292 = _filter filterName = u'WebSafe' if self._CHEETAH__filters.has_key("WebSafe"): _filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName] else: _filter = self._CHEETAH__currentFilter = \ self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter write(u'''<?xml version="1.0" encoding="UTF-8"?> <e2movielist> ''') for movie in VFFSL(SL,"movies",True): # generated from line 4, col 2 write(u'''\t\t<e2movie> \t\t\t<e2servicereference>''') _v = VFFSL(SL,"movie.fullname",True) # u'$movie.fullname' on line 6, col 24 if _v is not None: write(_filter(_v, rawExpr=u'$movie.fullname')) # from line 6, col 24. write(u'''</e2servicereference> \t\t\t<e2title>''') _v = VFFSL(SL,"movie.eventname",True) # u'$movie.eventname' on line 7, col 13 if _v is not None: write(_filter(_v, rawExpr=u'$movie.eventname')) # from line 7, col 13. write(u'''</e2title> \t\t\t<e2description>''') _v = VFFSL(SL,"movie.description",True) # u'$movie.description' on line 8, col 19 if _v is not None: write(_filter(_v, rawExpr=u'$movie.description')) # from line 8, col 19. write(u'''</e2description> \t\t\t<e2descriptionextended>''') _v = VFFSL(SL,"movie.descriptionExtended",True) # u'$movie.descriptionExtended' on line 9, col 27 if _v is not None: write(_filter(_v, rawExpr=u'$movie.descriptionExtended')) # from line 9, col 27. write(u'''</e2descriptionextended> \t\t\t<e2servicename>''') _v = VFFSL(SL,"movie.servicename",True) # u'$movie.servicename' on line 10, col 19 if _v is not None: write(_filter(_v, rawExpr=u'$movie.servicename')) # from line 10, col 19. write(u'''</e2servicename> \t\t\t<e2time>''') _v = VFFSL(SL,"movie.recordingtime",True) # u'$movie.recordingtime' on line 11, col 12 if _v is not None: write(_filter(_v, rawExpr=u'$movie.recordingtime')) # from line 11, col 12. write(u'''</e2time> \t\t\t<e2length>''') _v = VFFSL(SL,"movie.length",True) # u'$movie.length' on line 12, col 14 if _v is not None: write(_filter(_v, rawExpr=u'$movie.length')) # from line 12, col 14. write(u'''</e2length> \t\t\t<e2tags>''') _v = VFFSL(SL,"movie.tags",True) # u'$movie.tags' on line 13, col 12 if _v is not None: write(_filter(_v, rawExpr=u'$movie.tags')) # from line 13, col 12. write(u'''</e2tags> \t\t\t<e2filename>''') _v = VFFSL(SL,"movie.filename",True) # u'$movie.filename' on line 14, col 16 if _v is not None: write(_filter(_v, rawExpr=u'$movie.filename')) # from line 14, col 16. write(u'''</e2filename> \t\t\t<e2filesize>''') _v = VFFSL(SL,"movie.filesize",True) # u'$movie.filesize' on line 15, col 16 if _v is not None: write(_filter(_v, rawExpr=u'$movie.filesize')) # from line 15, col 16. write(u'''</e2filesize> \t\t</e2movie> ''') write(u'''</e2movielist> ''') _filter = self._CHEETAH__currentFilter = _orig_filter_90714292 ######################################## ## END - generated method body return _dummyTrans and trans.response().getvalue() or "" ################################################## ## CHEETAH GENERATED ATTRIBUTES _CHEETAH__instanceInitialized = False _CHEETAH_version = __CHEETAH_version__ _CHEETAH_versionTuple = __CHEETAH_versionTuple__ _CHEETAH_genTime = __CHEETAH_genTime__ _CHEETAH_genTimestamp = __CHEETAH_genTimestamp__ _CHEETAH_src = __CHEETAH_src__ _CHEETAH_srcLastModified = __CHEETAH_srcLastModified__ _mainCheetahMethod_for_movielist= 'respond' ## END CLASS DEFINITION if not hasattr(movielist, '_initCheetahAttributes'): templateAPIClass = getattr(movielist, '_CHEETAH_templateClass', Template) templateAPIClass._addCheetahPlumbingCodeToClass(movielist) # CHEETAH was developed by Tavis Rudd and Mike Orr # with code, advice and input from many other volunteers. # For more information visit http://www.CheetahTemplate.org/ ################################################## ## if run from command line: if __name__ == '__main__': from Cheetah.TemplateCmdLineIface import CmdLineIface CmdLineIface(templateObj=movielist()).run()
MOA-2011/enigma2-plugin-extensions-openwebif
plugin/controllers/views/web/movielist.py
Python
gpl-2.0
7,514
[ "VisIt" ]
2aadf6e9271638a50224e5377dd7e6b3b28646779a479ef6ef7f6b564a34ccce
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python2, python3 """Light-weight library for constructing model layers for architecture searches. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import math import numpy as np import six from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf from tunas import custom_layers from tunas import depthwise_initializers def _get_tiny_float(dtype): if dtype == tf.bfloat16: # Numpy doesn't tell us what the smallest possible value of a bfloat16 is, # so we use a hard-coded value based on bfloat16. return tf.constant(2e-38, tf.bfloat16) else: return np.finfo(dtype.as_numpy_dtype).tiny def _compute_explicit_padding(kernel_size, dilation_rate): """Compute the necessary padding based on kernel size and dilation rate.""" if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] if isinstance(dilation_rate, int): dilation_rate = [dilation_rate, dilation_rate] kernel_size_effective = [ kernel_size[0] + (kernel_size[0] - 1) * (dilation_rate[0] - 1), kernel_size[1] + (kernel_size[1] - 1) * (dilation_rate[1] - 1) ] pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] pad_beg = [pad_total[0] // 2, pad_total[1] // 2] pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] return [[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]], [0, 0]] def with_data_dependencies(dependencies, output_tensors): """Add data dependencies that can't be optimized away by XLA. In certain cases, we may wish to run one TensorFlow op to run before another. In pure TensorFlow, we'd usually do this with control dependencies. But XLA can ignore control dependencies in certain cases. We instead create a fake data dependency, which XLA can't ignore. Based on the implementation of the recompute_grad decorator by rsepassi@. Args: dependencies: List of tensors which must be evaluated before any element of output_tensors can be evaluated. output_tensors: List of output tensors. Returns: A list of tensors with the same shapes and types as `output_tensors`. """ # Compute a data dependency. data_dependencies = [] for dependency in dependencies: # Extract the scalar value dependency[0, 0, ..., 0] and append it to # `data_dependencies`. begin = tf.zeros([dependency.shape.ndims], tf.int32) size = tf.ones([dependency.shape.ndims], tf.int32) data_dependency = tf.reshape(tf.slice(dependency, begin, size), []) data_dependencies.append(tf.cast(data_dependency, dependencies[0].dtype)) sum_dependency = tf.stop_gradient(tf.add_n(data_dependencies)) # Apply it to each tensor in `output_tensors`. results = [] for tensor in output_tensors: tiny_float = _get_tiny_float(tensor.dtype) last_dep = tiny_float * tf.cast(sum_dependency, tensor.dtype) results.append(tensor + last_dep) return results def _mask_regularizer(regularizer, mask): """Multiply a variable regularizer's value by a binary (0-1) mask.""" def compute_masked_loss(value): loss = regularizer(value) if loss is None: return None if loss.shape.rank != 0: raise ValueError('loss must be scalar: {}'.format(loss)) if mask.shape.rank != 0: raise ValueError('mask must be scalar: {}'.format(mask)) return loss * tf.cast(mask, loss.dtype) return compute_masked_loss def _maximum_regularizer(regularizer1, regularizer2): """Take the maximum of two variable regularizers.""" def compute_loss(value): loss1 = regularizer1(value) loss2 = regularizer2(value) if loss1 is None: return loss2 elif loss2 is None: return loss1 else: return tf.maximum(loss1, loss2) return compute_loss class Layer(six.with_metaclass(abc.ABCMeta, object)): """Abstract base class representing a neural network layer.""" def __init__(self): self._trainable_variables = [] self._trainable_tensors = collections.OrderedDict() self._var_regularizers = collections.OrderedDict() self._moving_average_variables = collections.OrderedDict() self._tracked_layers = [] self._updates = [] def _create_trainable_variable(self, name, shape=None, dtype=None, initializer=None, regularizer=None): """Protected helper function to create a new trainable variable.""" if name in self._trainable_tensors: raise ValueError('Variable with name {!r} already exists'.format(name)) variable = tf.get_variable( name=name, shape=shape, dtype=dtype, initializer=initializer, trainable=True) if regularizer is not None: self._var_regularizers[variable] = regularizer self._trainable_variables.append(variable) # NOTE(b/123532966): We store both a list of trainable variables and a # dictionary containing Tensor snapshots of their values. In most cases, # we work with the snapshots rather than handling the variables directly. # This is needed to prevent TensorFlow from generating invalid graphs in # the body of Switch.apply(). # # We can end up generating invalid TensorFlow graphs if we try to mix # conditional control flow (tf.cond), custom gradients, and TensorFlow # variables. The apply() function of a Switch layer makes use of tf.cond # and custom gradients. The call to variable.read_value() here basically # ensures that the function doesn't have to deal with variables directly. # At the beginning of each training step, we call read_value() on each # trainable variable to obtain a Tensor snapshot of its most recent value. # For the rest of the training step, work with these Tensor snapshots # instead of trying to manipulate the variables directly. self._trainable_tensors[name] = variable.read_value() def _get_trainable_tensor(self, name): """Protected helper function to look up a trainable variable by name.""" return self._trainable_tensors[name] def _create_moving_average_variable(self, name, shape, initializer, dtype=tf.float32): self._moving_average_variables[name] = tf.get_variable( name=name, shape=shape, initializer=initializer, dtype=dtype, collections=[ tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES ], trainable=False) def _get_moving_average_variable(self, name): return self._moving_average_variables[name] def _update_moving_average_variable(self, name, value, momentum): """Protected helper method to update a moving average variable. WARNING: This method comes with a few caveats: 1. It should not be used inside Switch layers or other layers that rely on conditional control flow. 2. If using with rematerialization, the same variable may receive multiple updates. Args: name: String, name of the variable to create. value: Tensor, value for the moving average update. momentum: Float between 0 and 1, the momentum to use for the moving average update. """ var = self._moving_average_variables[name] update_op = tf.assign_sub(var, (var - value) * (1 - momentum)) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op) self._updates.append(update_op) def _track_layer(self, layer): """Protected helper function to mark `layer` as a child of the callee.""" self._tracked_layers.append(layer) def trainable_tensors(self): """Return a list of tensors corresponding to trainable variables.""" # This logic was originally added to work around a TF bug which caused some # bad interactions between tf.cond() statements, custom gradients, and # TensorFlow variables. However, the root cause of the problem involved # implementation details of COND_V1. We're not sure if it's still necessary, # since we've updated the code to use COND_V2 instead. # Ensure that no tensor is added more than once, and that tensors are added # in a predictable order. Ideally, we'd use an OrderedSet to keep track of # all the tensors, but Python doesn't have one, so we use an OrderedDict # instead. result_dict = collections.OrderedDict() for tensor in self._trainable_tensors.values(): result_dict[tensor] = None for layer in self._tracked_layers: for tensor in layer.trainable_tensors(): result_dict[tensor] = None return list(result_dict.keys()) def trainable_variables(self): """Returns a list of trainable variables for the layer and its children. WARNING: This function is intended for use outside of model construction, such as when calling Optimizer.minimize(var_list=model.trainable_variables()) You should use the trainable_tensors() method instead during model construction, especially within the apply() method. Returns: A list of tf.Variable objects. """ # Use an OrderedDict to deduplicate the list of variables while ensuring # that they're returned in a predictable order. result_dict = collections.OrderedDict() for variable in self._trainable_variables: result_dict[variable] = None for layer in self._tracked_layers: for variable in layer.trainable_variables(): result_dict[variable] = None return list(result_dict.keys()) def _get_all_variable_regularizers(self): """Returns a list of (variable, regularizer) pairs.""" # Deduplicate the list of regularizers so that no variable is regularized # more than once, even if it's used in multiple layers. result = collections.OrderedDict() result.update(self._var_regularizers) for layer in self._tracked_layers: for var, regularizer in layer._get_all_variable_regularizers().items(): # pylint:disable=protected-access if var in result: # The same variable can be used by more than one child of the current # layer. The regularizer might be masked out (i.e., multiplied by # zero) in some but not all of the children. We mask out a variable's # regularizer only when it is masked out by *every* child. See the # implementation of the Switch class for details. result[var] = _maximum_regularizer(result[var], regularizer) else: result[var] = regularizer return result def regularization_loss(self): """Compute the total regularization loss for a layer and its children. Returns: A scalar float Tensor. """ losses = [] for var, regularizer in self._get_all_variable_regularizers().items(): current_loss = regularizer(var) if current_loss is not None: losses.append(current_loss) if losses: return tf.add_n(losses) else: return tf.zeros(shape=(), dtype=tf.float32) def updates(self): """Get a list of update ops to apply for the current training step. Returns: A list of TensorFlow Operations. """ result = list(self._updates) for layer in self._tracked_layers: result.extend(layer.updates()) return result @abc.abstractmethod def build(self, input_shape): """Create any layer-specific variables and compute the output shape. Args: input_shape: tf.Shape, shape of the input tensor for this layer. Returns: tf.Shape of the output tensor returned by this layer. """ pass @abc.abstractmethod def apply(self, inputs, training): """Apply the current layer to the specified input tensor. Args: inputs: Tensor of input values. training: Boolean. True during model training, false during inference and evaluation. Returns: Tensor of output values. """ pass class Identity(Layer): """Network layer corresponding to an identity function.""" def build(self, input_shape): return input_shape def apply(self, inputs, training): del training return tf.identity(inputs) class Zeros(Layer): """Network layer that returns an all-zeros tensor. If output shape is not specified, return an all-zeros tensor with the same shape as the input tensor. Note the batch dimension of the output will be adjusted according to the input. """ def __init__(self, output_shape=None): super(Zeros, self).__init__() self._output_shape = output_shape def build(self, input_shape): if self._output_shape: # Adjust batch dimension of the output shape based on the input shape. batch_dim = input_shape[0] remaining_dims = [None] * (self._output_shape.rank - 1) return self._output_shape.merge_with([batch_dim] + remaining_dims) return input_shape def apply(self, inputs, training): del training if self._output_shape: # Batch dimension is allowed to vary based on inputs. Useful for cases # where self.apply is called twice with different batch sizes. batch_dim = tf.shape(inputs)[0] output_shape = tf.stack([batch_dim] + self._output_shape.as_list()[1:]) self._output_shape[:1].assert_is_compatible_with(inputs.shape[:1]) return tf.zeros(shape=output_shape, dtype=inputs.dtype) return tf.zeros_like(inputs) class ReLU(Layer): """Network layer corresponding to a ReLU activation function.""" def build(self, input_shape): return input_shape def apply(self, inputs, training): del training return tf.nn.relu(inputs) class ReLU6(Layer): """Network layer corresponding to a ReLU6 activation function.""" def build(self, input_shape): return input_shape def apply(self, inputs, training): del training return tf.nn.relu6(inputs) class Sigmoid(Layer): """Network layer corresponding to a sigmoid activation function.""" def build(self, input_shape): return input_shape def apply(self, inputs, training): del training return tf.nn.sigmoid(inputs) class Swish(Layer): """Network layer corresponding to a SiLU/Swish activation function. References: Hendrycks and Gimpel. "Gaussian Error Linear Units (GELUs)." https://arxiv.org/pdf/1606.08415.pdf Elfwing, Uchibe, and Doya. "Sigmoid-weighted linear units for neural network function approximation in reinforcement learning." Neural Networks, 107:3-11, 2018 Ramachandran, Zoph, and Le. "Searching for Activation Functions." https://arxiv.org/abs/1710.05941 """ def build(self, input_shape): return input_shape def apply(self, inputs, training): del training return tf.nn.swish(inputs) class Swish6(Layer): """Network layer corresponding to a Swish6/H-Swish activation. Swish6 is a modified variation of the SiLU/Swish activation function proposed in the MobileNet V3 paper. Reference: Section 5.2 of Howard et al. "Searching for MobileNet V3." https://arxiv.org/pdf/1905.02244.pdf """ def build(self, input_shape): return input_shape def apply(self, inputs, training): del training with tf.name_scope('Swish6'): return inputs * tf.nn.relu6(inputs + np.float32(3)) * np.float32(1. / 6.) class ELU(Layer): """Network layer corresponding to an ELU activation function. Reference: Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) https://arxiv.org/pdf/1511.07289.pdf """ def build(self, input_shape): return input_shape def apply(self, inputs, training): del training return tf.nn.elu(inputs) class SpaceToDepth(Layer): """Network layer corresponding to a space to depth function.""" def __init__(self, block_size): super(SpaceToDepth, self).__init__() self._block_size = block_size self._built = False def build(self, input_shape): assert len(input_shape) == 4, input_shape height_dim = tf.compat.dimension_value(input_shape[1]) if height_dim is not None: if height_dim % self._block_size != 0: raise ValueError('Image height {} must be a multiple of {}'.format( height_dim, self._block_size)) height_dim //= self._block_size width_dim = tf.compat.dimension_value(input_shape[2]) if width_dim is not None: if width_dim % self._block_size != 0: raise ValueError('Image width {} must be a multiple of {}'.format( width_dim, self._block_size)) width_dim //= self._block_size channel_dim = tf.compat.dimension_value(input_shape[3]) if channel_dim is not None: channel_dim *= pow(self._block_size, 2) output_shape = [input_shape[0], height_dim, width_dim, channel_dim] self._built = True return tf.TensorShape(output_shape) def apply(self, inputs, training): del training assert self._built return tf.nn.space_to_depth(inputs, self._block_size) class DepthPadding(Layer): """Network layer corresponding to a depth padding function.""" def __init__(self, filters): super(DepthPadding, self).__init__() self._filters = filters self._built = False def build(self, input_shape): assert len(input_shape) == 4, input_shape if int(input_shape[3]) > self._filters: raise ValueError('Output filters is smaller than input filters.') output_shape = input_shape.as_list() output_shape[3] = self._filters self._built = True return tf.TensorShape(output_shape) def apply(self, inputs, training): del training assert len(inputs.shape) == 4, inputs assert self._built input_filters = int(inputs.shape[3]) if input_filters > self._filters: raise ValueError('Output filters is smaller than input filters.') elif input_filters == self._filters: return inputs else: # input_filters < self._filters filters_padding = self._filters - tf.shape(inputs)[3] return tf.pad(inputs, [[0, 0], [0, 0], [0, 0], [0, filters_padding]]) def _get_pool_output_shape(input_shape, strides): """Get output shape for pooling ops with the 'SAME' padding scheme.""" filter_dim = int(input_shape[3]) return get_conv_output_shape(input_shape, strides, filter_dim) class SpatialMasking(Layer): """Network layer that masks the input tensor along spatial dimensions.""" def __init__(self, mask, name=None): super(SpatialMasking, self).__init__() assert len(mask.shape) == 2, mask self._mask = mask self._name = name self._built = False def build(self, input_shape): assert len(input_shape) == 4, input_shape with tf.name_scope(self._name, 'SpatialMasking') as scope: self._scope = scope self._spatial_mask = tf.expand_dims(tf.expand_dims(self._mask, 0), -1) self._built = True return input_shape def apply(self, inputs, training): del training assert self._built with tf.name_scope(self._scope): return inputs * tf.cast( tf.stop_gradient(self._spatial_mask), inputs.dtype) class MaxPool(Layer): """Network layer corresponding to a max pooling function.""" def __init__(self, kernel_size, strides, use_explicit_padding=False): super(MaxPool, self).__init__() self._kernel_size = kernel_size self._strides = (strides, strides) if isinstance(strides, int) else strides self._built = False self._use_explicit_padding = use_explicit_padding def build(self, input_shape): self._built = True assert len(input_shape) == 4, input_shape return _get_pool_output_shape(input_shape, self._strides) def apply(self, inputs, training): del training assert self._built padding = 'SAME' if self._use_explicit_padding: padding = 'VALID' inputs = tf.pad( tensor=inputs, paddings=_compute_explicit_padding(self._kernel_size, (1, 1))) return tf.nn.max_pool( inputs, self._kernel_size, self._strides, padding=padding) class AveragePool(Layer): """Network layer corresponding to an average pooling function.""" def __init__(self, kernel_size, strides): super(AveragePool, self).__init__() self._kernel_size = kernel_size self._strides = (strides, strides) if isinstance(strides, int) else strides self._built = False def build(self, input_shape): assert len(input_shape) == 4, input_shape self._built = True return _get_pool_output_shape(input_shape, self._strides) def apply(self, inputs, training): del training assert self._built return tf.nn.avg_pool( inputs, self._kernel_size, self._strides, padding='SAME') class GlobalAveragePool(Layer): """Network layer corresponding to a global average pooling function.""" def __init__(self, keepdims=False): super(GlobalAveragePool, self).__init__() self._keepdims = keepdims def build(self, input_shape): assert len(input_shape) == 4, input_shape if self._keepdims: return tf.TensorShape([input_shape[0], 1, 1, input_shape[3]]) else: return tf.TensorShape([input_shape[0], input_shape[3]]) def apply(self, inputs, training): del training height = tf.compat.dimension_value(inputs.shape[1]) width = tf.compat.dimension_value(inputs.shape[2]) if not height or not width: # Use tf.reduce_mean() instead of tf.nn.avg_pool() because average-pooling # ops with unknown kernel sizes are not supported by TensorFlow. return tf.reduce_mean(inputs, axis=[1, 2], keepdims=self._keepdims) if height == 1 and width == 1: if self._keepdims: return tf.identity(inputs) else: return tf.squeeze(inputs, axis=[1, 2]) # We try to use tf.nn.avg_pool() rather than tf.reduce_mean() wherever # possible, since tf.reduce_mean() is incompatible with certain mobile GPUs. result = tf.nn.avg_pool( inputs, [height, width], strides=1, padding='VALID') if not self._keepdims: result = tf.squeeze(result, axis=[1, 2]) return result class Dropout(Layer): """Network layer that implements dropout. Each element of the input is kept or dropped independently. """ def __init__(self, rate=0.5): """Class initializer. Args: rate: Float or scalar float tensor between 0 and 1. The fraction of input units to drop. """ super(Dropout, self).__init__() self._rate = rate def build(self, input_shape): return input_shape def apply(self, inputs, training): if training: return tf.nn.dropout(inputs, rate=self._rate) else: return tf.identity(inputs) class MultiplyByConstant(Layer): """Multiply the input by a non-trainable mask. Active only during training.""" def __init__(self, scale, name=None): super(MultiplyByConstant, self).__init__() self._scale = scale self._name = name self._scope = None self._built = False def build(self, input_shape): with tf.name_scope(self._name, 'MultiplyByConstant') as scope: self._scope = scope self._scale = tf.convert_to_tensor(self._scale) self._built = True return merge_shapes_with_broadcast(input_shape, self._scale.shape) def apply(self, inputs, training): del training # Unused assert self._built with tf.name_scope(self._scope): scale = tf.stop_gradient(tf.cast(self._scale, inputs.dtype)) return scale * inputs def _cond_v2(pred, true_fn, false_fn): """Hack to access tf.cond_v2(), which isn't part of TF's public interface. NOTE: This function will have the side effect of enabling tf.cond_v2() and while_v2 within true_fn and false_fn. Args: pred: Logical predicate, bool or scalar tf.bool Tensor. true_fn: Function to evaluate if pred is true. false_fn: Function to evaluate if pred is false. Returns: The output of true_fn or false_fn. """ is_cond_v2_enabled = tf.control_flow_v2_enabled() if not is_cond_v2_enabled: tf.enable_control_flow_v2() result = tf.cond(pred, true_fn, false_fn) if not is_cond_v2_enabled: tf.disable_control_flow_v2() return result def _make_cond(condition, if_true_fn, if_true_inputs, if_false): """Add a tf.cond() statement to the model.""" # NOTE: The code below is equivalent to # # return tf.cond( # condition, # lambda: if_true_fn(*if_true_inputs), # lambda: if_false) # # However, in an early version of the code, we were able to improve model # training throughput by 20-30% in test runs by flipping the `if` and `else` # branches. This should be fixed in the latest version of TensorFlow, but we # haven't tested it yet. # # We use cond_v2() instead of tf.cond(). Although the two operations are (in # theory) equivalent, cond_v2() typically works better on TPUs. return _cond_v2( tf.logical_not(condition), lambda: if_false, lambda: if_true_fn(*if_true_inputs)) class Sequential(Layer): """Sequence of layers, where the output of one is the input to the next.""" def __init__(self, layers, aux_outputs=None, name=None): super(Sequential, self).__init__() self._layers = layers self._name = name self._built = False for layer in self._layers: self._track_layer(layer) if aux_outputs is not None: self._aux_output_indices = [] for layer in aux_outputs: try: self._aux_output_indices.append(layers.index(layer)) except ValueError: # Raise a new ValueError with a more informative error message raise ValueError( 'element of aux_outputs does not appear in layers: {}'.format( layer)) else: self._aux_output_indices = None def build(self, input_shape): with tf.variable_scope(self._name, 'Sequential') as scope: self._scope = scope shape = input_shape for layer in self._layers: shape = layer.build(shape) self._built = True return shape def apply(self, inputs, training): assert self._built with tf.variable_scope(self._scope, reuse=True): value = inputs intermediate_values = [] for layer in self._layers: value = layer.apply(value, training) intermediate_values.append(value) if self._aux_output_indices is not None: aux_output_values = [ intermediate_values[i] for i in self._aux_output_indices ] return value, aux_output_values else: return value def merge_shapes_with_broadcast(shape1, shape2): """Compute the output shape for a binary op that supports broadcasting.""" shape1 = tf.TensorShape(shape1) shape2 = tf.TensorShape(shape2) # Handle the case where shape1 or shape2 contains no information. if not shape1: return shape2 if not shape2: return shape1 # Handle the case where one of the inputs is a scalar. if shape1.rank == 0: return shape2 if shape2.rank == 0: return shape1 # Make sure both shapes have the same rank. if shape1.rank != shape2.rank: raise ValueError('Tensor shapes must have the same rank: {} and {}'.format( shape1, shape2)) # Make sure each dimension is either equal or supports broadcasting. output_dims = [] for dim1, dim2 in zip(shape1.as_list(), shape2.as_list()): if dim1 is None: output_dims.append(dim2) elif dim2 is None: output_dims.append(dim1) elif dim1 == 1: output_dims.append(dim2) elif dim2 == 1: output_dims.append(dim1) elif dim1 == dim2: output_dims.append(dim1) else: # dim1 != dim2 raise ValueError('Tensor shapes are not compatible: {} vs {}'.format( shape1, shape2)) return tf.TensorShape(output_dims) class _ParallelAggregation(Layer): """Apply several layers to the same input, and combine their results.""" def __init__(self, branches, name=None): super(_ParallelAggregation, self).__init__() self._branches = branches self._name = name self._built = False for layer in branches: self._track_layer(layer) def build(self, input_shape): with tf.variable_scope(self._name, self.__class__.__name__) as scope: self._scope = scope output_shape = tf.TensorShape(None) for branch in self._branches: branch_shape = branch.build(input_shape) output_shape = merge_shapes_with_broadcast(output_shape, branch_shape) self._built = True return output_shape def apply(self, inputs, training): assert self._built with tf.variable_scope(self._scope, reuse=True): return self._reduce([ branch.apply(inputs, training) for branch in self._branches ]) @abc.abstractmethod def _reduce(self, tensors): pass class ParallelSum(_ParallelAggregation): """Apply several layers to the same input, and sum their results.""" def _reduce(self, tensors): result = tensors[0] for tensor in tensors[1:]: result = result + tensor return result class ParallelProduct(_ParallelAggregation): """Apply several layers to the same input, and multiply their results.""" def _reduce(self, tensors): result = tensors[0] for tensor in tensors[1:]: result = result * tensor return result class Switch(Layer): """Take a weighted combination of N possible options. Options whose weights are zero will be optimized away. """ def __init__(self, mask, options, name=None): """Class initializer. Args: mask: A float Tensor of shape [len(options)]. options: List of Layer instances. name: Optional string, name for the current layer. """ super(Switch, self).__init__() self._mask = mask self._options = options self._name = name self._built = False for layer in options: self._track_layer(layer) def build(self, input_shape): with tf.variable_scope(self._name, 'Switch') as scope: self._scope = scope output_shape = tf.TensorShape(None) for branch in self._options: branch_shape = branch.build(input_shape) output_shape = output_shape.merge_with(branch_shape) self._output_shape = output_shape self._built = True return output_shape def apply(self, inputs, training): assert self._built def apply_branch_fn(branch, weight, bias): # The first element of `all_inputs` is always equal to `inputs`. def fn(*all_inputs): result = branch.apply(all_inputs[0], training) return result * tf.cast(weight, result.dtype) + bias return fn @tf.custom_gradient def impl(*all_inputs): """Returns the output tensor and a function that computes its gradient.""" # Select which branch to take based on a discrete (integer-valued) tensor. mask = tf.stop_gradient(self._mask) mask.shape.assert_is_compatible_with([len(self._options)]) # During the apply pass, we evaluate one branch and throw away all the # intermediate outputs. with tf.variable_scope(self._scope, reuse=True): # Compute the output shape. The dimensions will generally be the same as # those returned by self.build(), but the batch size can be different. batch_size = tf.shape(all_inputs[0])[0] output_shape = tf.stack([batch_size] + self._output_shape.as_list()[1:]) # Forward pass output = tf.zeros(output_shape, dtype=all_inputs[0].dtype) for i, branch in enumerate(self._options): output = _make_cond( tf.not_equal(mask[i], 0), # If mask[i] != 0 then apply the current branch to `all_inputs`, # and add the result to `output`. apply_branch_fn(branch, mask[i], output), all_inputs, # Otherwise, leave the output unchanged output) def grad_fn(*output_grads): """Compute gradients for the switch statement.""" def update_grads_fn(grads, branch, weight): """Returns a function that adds gradients for `branch` to `grads`.""" def fn(*all_inputs): """Rematerializes `branch` and adds its gradients to `grads`.""" rematerialized_output = branch.apply(all_inputs[0], training) rematerialized_output *= tf.cast( weight, rematerialized_output.dtype) # Replace any `None` gradients with zeros. The gradients computed # here are returned in the `else` clause of a tf.cond() statement # later in the code, and trying to return a `None` value inside a # tf.cond statement would trigger an exception. grad_updates = tf.gradients( rematerialized_output, all_inputs, output_grads) sum_grads = [] for i in range(len(grad_updates)): if grad_updates[i] is not None: sum_grads.append(grads[i] + grad_updates[i]) else: sum_grads.append(grads[i]) return sum_grads return fn with tf.variable_scope(self._scope, reuse=True): grads = [tf.zeros_like(x) for x in all_inputs] for i, branch in enumerate(self._options): grads = _make_cond( tf.not_equal(mask[i], 0), # If mask[i] != 0 then take gradients w.r.t. current branch update_grads_fn(grads, branch, mask[i]), all_inputs, # Otherwise, leave the gradients unchanged grads) return grads return output, grad_fn # Main logic. all_inputs = [inputs] + self.trainable_tensors() return impl(*all_inputs) def _get_all_variable_regularizers(self): # Override the parent class's implementation to only regularizer variables # associated with the branch that's used for the current batch of training # examples. result = collections.OrderedDict() for i, option in enumerate(self._options): for var, regularizer in option._get_all_variable_regularizers().items(): # pylint:disable=protected-access # If a variable is not used by the selected `option` then we mask out # its regularizer (i.e., we multiply the regularizer by 0). var_regularizer = _mask_regularizer(regularizer, self._mask[i]) if var in result: # The same variable can be used by more than one of `self._options`. # For example, if both `self._options[0]` and `self._options[1]` make # use of the variable `var` with regularizer `reg` then the masked # regularizers for the two options will be # `reg(var) * tf.equal(self._selection, 0)` and # `reg(var) * tf.equal(self._selection, 1)` # respectively. By taking the maximum, we'll regularize the variable # if `0 <= self._selection <= 1` but not if `self._selection > 1`. result[var] = _maximum_regularizer(result[var], var_regularizer) else: result[var] = var_regularizer return result def _is_array_one_hot(array): """Returns true if `array` is one-hot.""" assert len(array.shape) == 1, array argmax = np.argmax(array) # One element of the array should have a value of 1 if array[argmax] != 1: return False # Each remaining element should have a value of 0 if np.count_nonzero(array) != 1: return False return True def maybe_switch_v2(mask, options, name=None): """Apply a Switch layer, optimizing it away if possible. Args: mask: A one-hot Tensor. options: A list of Layer objects. name: Optional string. Returns: A Layer object. """ if mask is None: if len(options) != 1: raise ValueError( 'Mask cannot be None unless len(options) == 1, but options = {}' .format(options)) return Sequential([options[0]], name=name) elif mask.shape == tf.TensorShape([1]): # We avoid using a Switch layer when mask.shape == [1]. This allows us to # use masks to train stand-alone models with path dropout. We can't use # Switch layers in this case because we need to maintain and update moving # average accumulators. if len(options) != 1: raise ValueError( 'Mask has shape [1] but options has length {:d}: {}' .format(len(options), options)) return Sequential([options[0], MultiplyByConstant(mask[0])], name=name) else: assert mask.shape.rank == 1, mask return Switch(mask, options, name) def get_conv_output_shape(input_shape, strides, output_filters): """Get output shape for conv/pooling ops with the 'SAME' padding scheme.""" if isinstance(strides, int): strides = (strides, strides) height_dim = tf.compat.dimension_value(input_shape[1]) if height_dim is not None: height_dim = int(math.ceil(height_dim / strides[0])) width_dim = tf.compat.dimension_value(input_shape[2]) if width_dim is not None: width_dim = int(math.ceil(width_dim / strides[1])) output_shape = tf.TensorShape( [input_shape[0], height_dim, width_dim, output_filters]) return output_shape class Conv2D(Layer): """2D convolution network layer.""" def __init__(self, filters, kernel_size, strides=(1, 1), dilation_rates=(1, 1), kernel_initializer=tf.initializers.he_normal(), kernel_regularizer=None, bias_initializer=tf.initializers.zeros(), bias_regularizer=None, use_bias=False, use_explicit_padding=False, name=None): super(Conv2D, self).__init__() self._filters = filters if isinstance(kernel_size, int): self._kernel_size = (kernel_size, kernel_size) else: self._kernel_size = kernel_size if isinstance(strides, int): self._strides = (strides, strides) else: self._strides = strides if isinstance(dilation_rates, int): self._dilation_rates = (dilation_rates, dilation_rates) else: self._dilation_rates = dilation_rates self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_initializer = bias_initializer self._bias_regularizer = bias_regularizer self._use_bias = use_bias self._name = name self._built = False self._use_explicit_padding = use_explicit_padding def build(self, input_shape): with tf.variable_scope(self._name, 'Conv2D') as scope: self._scope = scope input_filters = int(input_shape[-1]) kernel_shape = tuple(self._kernel_size) + (input_filters, self._filters) if not self._built: self._create_trainable_variable( name='kernel', shape=kernel_shape, initializer=self._kernel_initializer, regularizer=self._kernel_regularizer) if self._use_bias: self._create_trainable_variable( name='bias', shape=(self._filters,), initializer=self._bias_initializer, regularizer=self._bias_regularizer) output_shape = get_conv_output_shape( input_shape, self._strides, output_filters=self._filters) self._built = True return output_shape def apply(self, inputs, training): del training assert self._built with tf.variable_scope(self._scope, reuse=True): kernel = self._get_trainable_tensor('kernel') kernel = tf.cast(kernel, inputs.dtype) padding = 'SAME' if self._use_explicit_padding: padding = 'VALID' inputs = tf.pad( tensor=inputs, paddings=_compute_explicit_padding(self._kernel_size, self._dilation_rates)) result = tf.nn.conv2d( input=inputs, filters=kernel, strides=[1] + list(self._strides) + [1], padding=padding, dilations=[1] + list(self._dilation_rates) + [1]) if self._use_bias: bias = self._get_trainable_tensor('bias') bias = tf.cast(bias, result.dtype) return tf.nn.bias_add(result, bias) else: return result class DepthwiseConv2D(Layer): """2D depthwise convolution layer.""" def __init__(self, kernel_size, strides=(1, 1), dilation_rates=(1, 1), depthwise_initializer= depthwise_initializers.depthwise_he_normal(), depthwise_regularizer=None, use_explicit_padding=False, name=None): super(DepthwiseConv2D, self).__init__() if isinstance(kernel_size, int): self._kernel_size = (kernel_size, kernel_size) else: self._kernel_size = kernel_size if isinstance(strides, int): self._strides = (strides, strides) else: self._strides = strides if isinstance(dilation_rates, int): self._dilation_rates = (dilation_rates, dilation_rates) else: self._dilation_rates = dilation_rates # tf.nn.depthwise_conv2d restricts that if dilation rates are # greater than 1, then all strides must be equal to 1. if self._dilation_rates != (1, 1) and self._strides != (1, 1): raise ValueError( 'Non-unit dilations {0} can only be used with unit strides {1}. ' .format(self._dilation_rates, self._strides)) self._depthwise_initializer = depthwise_initializer self._depthwise_regularizer = depthwise_regularizer self._name = name self._built = False self._use_explicit_padding = use_explicit_padding def build(self, input_shape): with tf.variable_scope(self._name, 'DepthwiseConv2D') as scope: self._scope = scope input_filters = int(input_shape[-1]) kernel_shape = tuple(self._kernel_size) + (input_filters, 1) if not self._built: self._create_trainable_variable( name='kernel', shape=kernel_shape, initializer=self._depthwise_initializer, regularizer=self._depthwise_regularizer) output_shape = get_conv_output_shape( input_shape, self._strides, output_filters=int(input_shape[3])) self._built = True return output_shape def apply(self, inputs, training): del training assert self._built with tf.variable_scope(self._scope, reuse=True): kernel = self._get_trainable_tensor('kernel') kernel = tf.cast(kernel, inputs.dtype) padding = 'SAME' if self._use_explicit_padding: padding = 'VALID' inputs = tf.pad( tensor=inputs, paddings=_compute_explicit_padding(self._kernel_size, self._dilation_rates)) result = tf.nn.depthwise_conv2d( input=inputs, filter=kernel, strides=[1] + list(self._strides) + [1], padding=padding, dilations=list(self._dilation_rates)) return result class BatchNorm(Layer): """Abstract base class representing a batch normalization layer.""" def __init__(self, epsilon=1e-12, center=True, scale=True, beta_initializer=tf.initializers.zeros(), gamma_initializer=tf.initializers.ones(), momentum=0.99, stateful=True, name=None): super(BatchNorm, self).__init__() self._epsilon = epsilon self._center = center self._scale = scale self._beta_initializer = beta_initializer self._gamma_initializer = gamma_initializer self._momentum = momentum self._stateful = stateful self._name = name self._built = False def build(self, input_shape): with tf.variable_scope(self._name, 'BatchNorm') as scope: self._scope = scope if not self._built: if self._center: self._create_trainable_variable( name='beta', shape=[int(input_shape[-1])], dtype=tf.float32, initializer=self._beta_initializer) if self._scale: self._create_trainable_variable( name='gamma', shape=[int(input_shape[-1])], dtype=tf.float32, initializer=self._gamma_initializer) if self._stateful: self._create_moving_average_variable( name='moving_mean', shape=[int(input_shape[-1])], dtype=tf.float32, initializer=tf.initializers.zeros()) self._create_moving_average_variable( name='moving_variance', shape=[int(input_shape[-1])], dtype=tf.float32, initializer=tf.initializers.ones()) self._built = True return input_shape def apply(self, inputs, training): assert self._built with tf.variable_scope(self._scope, reuse=True): scale = self._get_trainable_tensor('gamma') if self._scale else None offset = self._get_trainable_tensor('beta') if self._center else None if self._stateful and not training: moving_mean = self._get_moving_average_variable('moving_mean') moving_variance = self._get_moving_average_variable('moving_variance') else: moving_mean = None moving_variance = None result, mean, variance = tf.nn.fused_batch_norm( inputs, scale=scale, offset=offset, mean=moving_mean, variance=moving_variance, epsilon=self._epsilon, is_training=training or not self._stateful) if self._stateful and training: self._update_moving_average_variable( 'moving_mean', mean, self._momentum) self._update_moving_average_variable( 'moving_variance', variance, self._momentum) return result def _regularizer_over_masked_variable(regularizer, mask, transpose=False): """return a regularizer over the masked tensor.""" if regularizer is None: return None def compute_regularizer(value): if transpose: value = tf.transpose(value, perm=[0, 1, 3, 2]) return regularizer(value * mask) return compute_regularizer class MaskedDepthwiseConv2D(Layer): """2D masked depthwise convolution layer.""" def __init__(self, kernel_size, mask, strides=(1, 1), depthwise_initializer= depthwise_initializers.depthwise_he_normal(), depthwise_regularizer=None, transpose_depthwise_kernels=False, use_explicit_padding=False, name=None): super(MaskedDepthwiseConv2D, self).__init__() if isinstance(kernel_size, int): self._kernel_size = (kernel_size, kernel_size) else: self._kernel_size = kernel_size if isinstance(strides, int): self._strides = (strides, strides) else: self._strides = strides self._depthwise_initializer = depthwise_initializer self._depthwise_regularizer = depthwise_regularizer self._name = name self._built = False self._transpose_depthwise_kernels = transpose_depthwise_kernels self._use_explicit_padding = use_explicit_padding # NOTE(gbender, hanxiaol): Be careful that TF might try to back-propagate # through the masks and cause issues with the 'Switch' statements (which # use custom gradients). A potential solution would be integrating the logic # of masks also into custom gradients. self._mask = tf.reshape(mask, [1, 1, -1, 1]) def build(self, input_shape): with tf.variable_scope(self._name, 'MaskedDepthwiseConv2D') as scope: self._scope = scope max_filters = int(self._mask.shape[2]) if int(input_shape[-1]) != max_filters: raise ValueError( 'padded input filter size ({:d}) must match the max possible ' 'effective filter size ({:d}).' .format(int(input_shape[-1]), max_filters)) if not self._built: mask = tf.stop_gradient(self._mask) masked_depthwise_regularizer = _regularizer_over_masked_variable( self._depthwise_regularizer, mask, transpose=self._transpose_depthwise_kernels) if self._transpose_depthwise_kernels: kernel_shape = tuple(self._kernel_size) + (1, max_filters) depthwise_initializer = custom_layers.TransposedInitializer( self._depthwise_initializer) else: kernel_shape = tuple(self._kernel_size) + (max_filters, 1) depthwise_initializer = self._depthwise_initializer self._create_trainable_variable( name='kernel', shape=kernel_shape, initializer=depthwise_initializer, regularizer=masked_depthwise_regularizer) output_shape = get_conv_output_shape( input_shape, self._strides, output_filters=max_filters) self._built = True return output_shape def apply(self, inputs, training): del training assert self._built with tf.variable_scope(self._scope, reuse=True): kernel = self._get_trainable_tensor('kernel') if self._transpose_depthwise_kernels: # Transpose the depthwise kernel back to the right shape. kernel = tf.transpose(kernel, perm=[0, 1, 3, 2]) mask = tf.stop_gradient(self._mask) masked_kernel = kernel * mask masked_kernel = tf.cast(masked_kernel, inputs.dtype) padding = 'SAME' if self._use_explicit_padding: padding = 'VALID' inputs = tf.pad( tensor=inputs, paddings=_compute_explicit_padding(self._kernel_size, (1, 1))) result = tf.nn.depthwise_conv2d( inputs, masked_kernel, strides=(1,) + tuple(self._strides) + (1,), padding=padding) return result class MaskedConv2D(Layer): """2D masked convolution network layer.""" def __init__(self, kernel_size, input_mask, output_mask, strides=(1, 1), kernel_initializer=tf.initializers.he_normal(), kernel_regularizer=None, bias_initializer=tf.initializers.zeros(), bias_regularizer=None, use_bias=False, use_explicit_padding=False, name=None): super(MaskedConv2D, self).__init__() if isinstance(kernel_size, int): self._kernel_size = (kernel_size, kernel_size) else: self._kernel_size = kernel_size if isinstance(strides, int): self._strides = (strides, strides) else: self._strides = strides self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._bias_initializer = bias_initializer self._bias_regularizer = bias_regularizer self._use_bias = use_bias self._name = name self._built = False self._use_explicit_padding = use_explicit_padding if input_mask is None: self._input_mask = None else: self._input_mask = tf.reshape(input_mask, [1, 1, -1, 1]) self._output_mask = tf.reshape(output_mask, [1, 1, 1, -1]) if use_bias: self._bias_mask = output_mask def build(self, input_shape): with tf.variable_scope(self._name, 'MaskedConv2D') as scope: self._scope = scope if self._input_mask is None: max_input_filters = int(input_shape[-1]) else: max_input_filters = int(self._input_mask.shape[2]) max_output_filters = int(self._output_mask.shape[3]) if int(input_shape[-1]) != max_input_filters: raise ValueError( 'padded input filter size ({:d}) must match the max possible ' 'effective input filter size ({:d}) in scope: {}.' .format(int(input_shape[-1]), max_input_filters, scope.name)) if not self._built: mask = tf.stop_gradient(self._output_mask) if self._input_mask is not None: input_mask = tf.stop_gradient(self._input_mask) mask = mask * input_mask masked_kernel_regularizer = _regularizer_over_masked_variable( self._kernel_regularizer, mask) kernel_shape = tuple(self._kernel_size) + ( max_input_filters, max_output_filters) self._create_trainable_variable( name='kernel', shape=kernel_shape, initializer=self._kernel_initializer, regularizer=masked_kernel_regularizer) if self._use_bias: masked_bias_regularizer = _regularizer_over_masked_variable( self._bias_regularizer, self._bias_mask) self._create_trainable_variable( name='bias', shape=(max_output_filters,), initializer=self._bias_initializer, regularizer=masked_bias_regularizer) output_shape = get_conv_output_shape( input_shape, self._strides, output_filters=max_output_filters) self._built = True return output_shape def apply(self, inputs, training): del training assert self._built with tf.variable_scope(self._scope, reuse=True): kernel = self._get_trainable_tensor('kernel') mask = tf.stop_gradient(self._output_mask) if self._input_mask is not None: input_mask = tf.stop_gradient(self._input_mask) mask = mask * input_mask masked_kernel = kernel * mask masked_kernel = tf.cast(masked_kernel, inputs.dtype) padding = 'SAME' if self._use_explicit_padding: padding = 'VALID' inputs = tf.pad( tensor=inputs, paddings=_compute_explicit_padding(self._kernel_size, (1, 1))) result = tf.nn.conv2d( inputs, masked_kernel, strides=[1] + list(self._strides) + [1], padding=padding) if self._use_bias: bias = self._get_trainable_tensor('bias') bias_mask = tf.stop_gradient(self._bias_mask) masked_bias = bias * bias_mask masked_bias = tf.cast(masked_bias, result.dtype) return tf.nn.bias_add(result, masked_bias) else: return result class MaskedStatelessBatchNorm(Layer): """Masked stateless batch normalization layer.""" def __init__(self, mask, epsilon=1e-12, center=True, scale=True, beta_initializer=tf.initializers.zeros(), gamma_initializer=tf.initializers.ones(), name=None): super(MaskedStatelessBatchNorm, self).__init__() self._epsilon = epsilon self._center = center self._scale = scale self._beta_initializer = beta_initializer self._gamma_initializer = gamma_initializer self._name = name self._built = False self._mask = tf.reshape(mask, [1, 1, 1, -1]) def build(self, input_shape): with tf.variable_scope(self._name, 'MaskedStatelessBatchNorm') as scope: self._scope = scope max_filters = int(self._mask.shape[3]) if int(input_shape[-1]) != max_filters: raise ValueError( 'padded input filter size ({:d}) must match the max possible ' 'effective filter size ({:d}).' .format(int(input_shape[-1]), max_filters)) if not self._built: if self._center: self._create_trainable_variable( name='beta', shape=[int(input_shape[-1])], dtype=tf.float32, initializer=self._beta_initializer) if self._scale: self._create_trainable_variable( name='gamma', shape=[int(input_shape[-1])], dtype=tf.float32, initializer=self._gamma_initializer) self._built = True return input_shape def apply(self, inputs, training): del training assert self._built with tf.variable_scope(self._scope, reuse=True): scale = self._get_trainable_tensor('gamma') if self._scale else None offset = self._get_trainable_tensor('beta') if self._center else None mask = tf.stop_gradient(self._mask) mask = tf.cast(mask, inputs.dtype) result, unused_mean, unused_var = tf.nn.fused_batch_norm( inputs, scale=scale, offset=offset, epsilon=self._epsilon, is_training=True) result = result * mask return result def create_mask(choices, selection): """Create a 1-dimensional mask for the given choices and selection.""" # Opt out tf.gather if there's only a single option if len(choices) == 1: k = choices[0] else: k = tf.gather(choices, selection) n = max(choices) mask = tf.sequence_mask(k, n, dtype=tf.float32) return mask
google-research/google-research
tunas/rematlib/layers.py
Python
apache-2.0
57,585
[ "Gaussian" ]
b47cc05f246bf660dd8c21911c7afa09516312ffdcf53c6b6b55bb445fb682d9
#!/usr/bin/python3 # -*- coding: utf-8 -*- # # Ocean Manager Tool # # Copyright (C) 2015 Bitergia # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Authors: # Alvaro del Castillo San Felix <acs@bitergia.com> # import argparse from datetime import datetime import logging import requests import sys from grimoire.elk.elastic import ElasticSearch, ElasticConnectException, ElasticWriteException from grimoire.ocean.elastic import ElasticOcean from grimoire.ocean.conf import ConfOcean def get_elastic(): try: ocean_index = ConfOcean.get_index() elastic_ocean = ElasticSearch(args.elastic_url, ocean_index) except ElasticConnectException: logging.error("Can't connect to Elastic Search. Is it running?") sys.exit(1) except ElasticWriteException: logging.error("Can't write to Elastic Search.") sys.exit(1) return elastic_ocean def get_params(): ''' Get params definition from ElasticOcean ''' parser = argparse.ArgumentParser() ElasticOcean.add_params(parser) # Commands supported parser.add_argument("-l", "--list", action='store_true', help="Lists repositories") parser.add_argument("-r", "--remove", help="Remove a repository") # parser.add_argument("--rename", # help="Rename a repository") args = parser.parse_args() return args def list_repos_ids(): logging.debug("Listing repos ids") elastic = get_elastic() ConfOcean.set_elastic(elastic) for repo_id in ConfOcean.get_repos_ids(): print(repo_id) def list_repos(): logging.debug("Listing repos") elastic = get_elastic() ConfOcean.set_elastic(elastic) for repo_id in ConfOcean.get_repos_ids(): elastic = get_elastic() url = elastic.index_url + "/repos/" + repo_id r = requests.get(url) repo = r.json()['_source'] print ("%s %s %s" % (repo_id, repo['repo_update'], repo['success'])) def remove_repo(repo_id): logging.info("Removing repo: %s" % (repo_id)) elastic = get_elastic() url = elastic.index_url + "/repos/" + repo_id r = requests.delete(url) if r.status_code == 200: logging.info("Done") else: logging.error("Can not remove %s (%i)" % (repo_id, r.status_code)) def config_logging(debug): if debug: logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s') logging.debug("Debug mode activated") else: logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("requests").setLevel(logging.WARNING) if __name__ == '__main__': app_init = datetime.now() args = get_params() config_logging(args.debug) if args.list: list_repos() elif args.remove: remove_repo(args.remove)
sanacl/GrimoireELK
utils/ocean.py
Python
gpl-3.0
3,589
[ "Elk" ]
0e9768df70fa12868a5bbe2cec853e3ba28c0f960eed3b006c0596e3569df9b8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Multivariate Normal distribution classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from tensorflow.contrib.distributions.python.ops import distribution from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky from tensorflow.contrib.distributions.python.ops import operator_pd_full from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops __all__ = [ "MultivariateNormalCholesky", "MultivariateNormalFull", ] class MultivariateNormalOperatorPD(distribution.Distribution): """The multivariate normal distribution on `R^k`. This distribution is defined by a 1-D mean `mu` and an instance of `OperatorPDBase`, which provides access to a symmetric positive definite operator, which defines the covariance. #### Mathematical details The PDF of this distribution is: ``` f(x) = (2*pi)^(-k/2) |det(sigma)|^(-1/2) exp(-1/2*(x-mu)^*.sigma^{-1}.(x-mu)) ``` where `.` denotes the inner product on `R^k` and `^*` denotes transpose. #### Examples A single multi-variate Gaussian distribution is defined by a vector of means of length `k`, and a covariance matrix of shape `k x k`. Extra leading dimensions, if provided, allow for batches. ```python # Initialize a single 3-variate Gaussian. mu = [1, 2, 3] chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]] cov = tf.contrib.distributions.OperatorPDCholesky(chol) dist = tf.contrib.distributions.MultivariateNormalOperatorPD(mu, cov) # Evaluate this on an observation in R^3, returning a scalar. dist.pdf([-1, 0, 1.]) # Initialize a batch of two 3-variate Gaussians. mu = [[1, 2, 3], [11, 22, 33.]] chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal. cov = tf.contrib.distributions.OperatorPDCholesky(chol) dist = tf.contrib.distributions.MultivariateNormalOperatorPD(mu, cov) # Evaluate this on a two observations, each in R^3, returning a length two # tensor. x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3. dist.pdf(x) ``` """ def __init__( self, mu, cov, allow_nan=False, strict=True, strict_statistics=True, name="MultivariateNormalCov"): """Multivariate Normal distributions on `R^k`. User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`, which determines the covariance. Args: mu: `float` or `double` tensor with shape `[N1,...,Nb, k]`, `b >= 0`. cov: `float` or `double` instance of `OperatorPDBase` with same `dtype` as `mu` and shape `[N1,...,Nb, k, k]`. allow_nan: Boolean, default False. If False, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If True, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. strict: Whether to validate input with asserts. If `strict` is `False`, and the inputs are invalid, correct behavior is not guaranteed. strict_statistics: Boolean, default True. If True, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If False, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: The name to give Ops created by the initializer. Raises: TypeError: If `mu` and `cov` are different dtypes. """ self._strict_statistics = strict_statistics self._strict = strict with ops.name_scope(name): with ops.op_scope([mu] + cov.inputs, "init"): self._cov = cov self._mu = self._check_mu(mu) self._name = name def _check_mu(self, mu): """Return `mu` after validity checks and possibly with assertations.""" mu = ops.convert_to_tensor(mu) cov = self._cov if mu.dtype != cov.dtype: raise TypeError( "mu and cov must have the same dtype. Found mu.dtype = %s, " "cov.dtype = %s" % (mu.dtype, cov.dtype)) if not self.strict: return mu else: assert_compatible_shapes = control_flow_ops.group( check_ops.assert_equal( array_ops.rank(mu) + 1, cov.rank(), data=["mu should have rank 1 less than cov. Found: rank(mu) = ", array_ops.rank(mu), " rank(cov) = ", cov.rank()], ), check_ops.assert_equal( array_ops.shape(mu), cov.vector_shape(), data=["mu.shape and cov.shape[:-1] should match. " "Found: shape(mu) = " , array_ops.shape(mu), " shape(cov) = ", cov.shape()], ), ) return control_flow_ops.with_dependencies([assert_compatible_shapes], mu) @property def strict(self): """Boolean describing behavior on invalid input.""" return self._strict @property def strict_statistics(self): """Boolean describing behavior when a stat is undefined for batch member.""" return self._strict_statistics @property def dtype(self): return self._mu.dtype def get_event_shape(self): """`TensorShape` available at graph construction time.""" # Recall _check_mu ensures mu and self._cov have same batch shape. return self._cov.get_shape()[-1:] def event_shape(self, name="event_shape"): """Shape of a sample from a single distribution as a 1-D int32 `Tensor`.""" # Recall _check_mu ensures mu and self._cov have same batch shape. with ops.name_scope(self.name): with ops.op_scope(self._cov.inputs, name): return array_ops.pack([self._cov.vector_space_dimension()]) def batch_shape(self, name="batch_shape"): """Batch dimensions of this instance as a 1-D int32 `Tensor`.""" # Recall _check_mu ensures mu and self._cov have same batch shape. with ops.name_scope(self.name): with ops.op_scope(self._cov.inputs, name): return self._cov.batch_shape() def get_batch_shape(self): """`TensorShape` available at graph construction time.""" # Recall _check_mu ensures mu and self._cov have same batch shape. return self._cov.get_batch_shape() @property def mu(self): return self._mu @property def sigma(self): """Dense (batch) covariance matrix, if available.""" with ops.name_scope(self.name): return self._cov.to_dense() def mean(self, name="mean"): """Mean of each batch member.""" with ops.name_scope(self.name): with ops.op_scope([self._mu], name): return array_ops.identity(self._mu) def mode(self, name="mode"): """Mode of each batch member.""" with ops.name_scope(self.name): with ops.op_scope([self._mu], name): return array_ops.identity(self._mu) def variance(self, name="variance"): """Variance of each batch member.""" with ops.name_scope(self.name): return self.sigma def log_sigma_det(self, name="log_sigma_det"): """Log of determinant of covariance matrix.""" with ops.name_scope(self.name): with ops.op_scope(self._cov.inputs, name): return self._cov.log_det() def sigma_det(self, name="sigma_det"): """Determinant of covariance matrix.""" with ops.name_scope(self.name): with ops.op_scope(self._cov.inputs, name): return math_ops.exp(self._cov.log_det()) def log_prob(self, x, name="log_prob"): """Log prob of observations `x` given these Multivariate Normals. `x` is a batch vector with compatible shape if `x` is a `Tensor` whose shape can be broadcast up to either: ```` self.batch_shape + self.event_shape OR [M1,...,Mm] + self.batch_shape + self.event_shape ``` Args: x: Compatible batch vector with same `dtype` as this distribution. name: The name to give this op. Returns: log_prob: tensor of dtype `dtype`, the log-PDFs of `x`. """ # Q: Why are shape requirements as stated above? # A: The compatible shapes are precisely the ones that will broadcast to # a shape compatible with self._cov. # See Operator base class for notes about shapes compatible with self._cov. with ops.name_scope(self.name): with ops.op_scope([self._mu, x] + self._cov.inputs, name): x = ops.convert_to_tensor(x) contrib_tensor_util.assert_same_float_dtype((self._mu, x)) # _check_mu asserts that self.mu has same batch shape as self.cov. # so batch shape of self.mu = that of self._cov and self, and the # batch shape of x_centered is a broadcast version of these. If this # broadcast results in a shape like # [M1,...,Mm] + self.batch_shape + self.event_shape # OR # self.batch_shape + self.event_shape # then subsequent operator calls are guaranteed to work. x_centered = x - self.mu # Compute the term x^{-1} sigma^{-1} x which appears in the exponent of # the pdf. x_whitened_norm = self._cov.inv_quadratic_form_on_vectors(x_centered) log_sigma_det = self.log_sigma_det() log_two_pi = constant_op.constant( math.log(2 * math.pi), dtype=self.dtype) k = math_ops.cast(self._cov.vector_space_dimension(), self.dtype) log_prob_value = -(log_sigma_det + k * log_two_pi + x_whitened_norm) / 2 output_static_shape = x_centered.get_shape()[:-1] log_prob_value.set_shape(output_static_shape) return log_prob_value def prob(self, x, name="prob"): """The PDF of observations `x` under these Multivariate Normals. `x` is a batch vector with compatible shape if `x` is a `Tensor` whose shape can be broadcast up to either: ```` self.batch_shape + self.event_shape OR [M1,...,Mm] + self.batch_shape + self.event_shape ``` Args: x: Compatible batch vector with same `dtype` as this distribution. name: The name to give this op. Returns: prob: tensor of dtype `dtype`, the prob values of `x`. """ with ops.name_scope(self.name): with ops.op_scope([self._mu, x] + self._cov.inputs, name): return math_ops.exp(self.log_prob(x)) def entropy(self, name="entropy"): """The entropies of these Multivariate Normals. Args: name: The name to give this op. Returns: entropy: tensor of dtype `dtype`, the entropies. """ with ops.name_scope(self.name): with ops.op_scope([self._mu] + self._cov.inputs, name): log_sigma_det = self.log_sigma_det() one_plus_log_two_pi = constant_op.constant(1 + math.log(2 * math.pi), dtype=self.dtype) # Use broadcasting rules to calculate the full broadcast sigma. k = math_ops.cast(self._cov.vector_space_dimension(), dtype=self.dtype) entropy_value = (k * one_plus_log_two_pi + log_sigma_det) / 2 entropy_value.set_shape(log_sigma_det.get_shape()) return entropy_value def sample(self, n, seed=None, name="sample"): """Sample `n` observations from the Multivariate Normal Distributions. Args: n: `Scalar`, type int32, the number of observations to sample. seed: Python integer, the random seed. name: The name to give this op. Returns: samples: `[n, ...]`, a `Tensor` of `n` samples for each of the distributions determined by broadcasting the hyperparameters. """ with ops.name_scope(self.name): with ops.op_scope([self._mu, n] + self._cov.inputs, name): # Recall _check_mu ensures mu and self._cov have same batch shape. broadcast_shape = self.mu.get_shape() n = ops.convert_to_tensor(n) shape = array_ops.concat(0, [self._cov.vector_shape(), [n]]) white_samples = random_ops.random_normal(shape=shape, mean=0, stddev=1, dtype=self.dtype, seed=seed) correlated_samples = self._cov.sqrt_matmul(white_samples) # Move the last dimension to the front perm = array_ops.concat(0, ( array_ops.pack([array_ops.rank(correlated_samples) - 1]), math_ops.range(0, array_ops.rank(correlated_samples) - 1))) # TODO(ebrevdo): Once we get a proper tensor contraction op, # perform the inner product using that instead of batch_matmul # and this slow transpose can go away! correlated_samples = array_ops.transpose(correlated_samples, perm) samples = correlated_samples + self.mu # Provide some hints to shape inference n_val = tensor_util.constant_value(n) final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape) samples.set_shape(final_shape) return samples @property def is_reparameterized(self): return True @property def name(self): return self._name @property def is_continuous(self): return True class MultivariateNormalCholesky(MultivariateNormalOperatorPD): """The multivariate normal distribution on `R^k`. This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`. Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling, and requires `O(k^2)` storage. #### Mathematical details The PDF of this distribution is: ``` f(x) = (2*pi)^(-k/2) |det(sigma)|^(-1/2) exp(-1/2*(x-mu)^*.sigma^{-1}.(x-mu)) ``` where `.` denotes the inner product on `R^k` and `^*` denotes transpose. #### Examples A single multi-variate Gaussian distribution is defined by a vector of means of length `k`, and a covariance matrix of shape `k x k`. Extra leading dimensions, if provided, allow for batches. ```python # Initialize a single 3-variate Gaussian with diagonal covariance. mu = [1, 2, 3.] chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]] dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol) # Evaluate this on an observation in R^3, returning a scalar. dist.pdf([-1, 0, 1]) # Initialize a batch of two 3-variate Gaussians. mu = [[1, 2, 3], [11, 22, 33]] chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal. dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol) # Evaluate this on a two observations, each in R^3, returning a length two # tensor. x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3. dist.pdf(x) ``` Trainable (batch) Choesky matrices can be created with `tf.contrib.distributions.batch_matrix_diag_transform()` """ def __init__( self, mu, chol, strict=True, strict_statistics=True, name="MultivariateNormalCholesky"): """Multivariate Normal distributions on `R^k`. User must provide means `mu` and `chol` which holds the (batch) Cholesky factors `S`, such that the covariance of each batch member is `S S^*`. Args: mu: `(N+1)-D` `float` or `double` tensor with shape `[N1,...,Nb, k]`, `b >= 0`. chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape `[N1,...,Nb, k, k]`. strict: Whether to validate input with asserts. If `strict` is `False`, and the inputs are invalid, correct behavior is not guaranteed. strict_statistics: Boolean, default True. If True, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If False, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: The name to give Ops created by the initializer. Raises: TypeError: If `mu` and `chol` are different dtypes. """ cov = operator_pd_cholesky.OperatorPDCholesky(chol, verify_pd=strict) super(MultivariateNormalCholesky, self).__init__( mu, cov, strict_statistics=strict_statistics, strict=strict, name=name) class MultivariateNormalFull(MultivariateNormalOperatorPD): """The multivariate normal distribution on `R^k`. This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`. Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations. #### Mathematical details The PDF of this distribution is: ``` f(x) = (2*pi)^(-k/2) |det(sigma)|^(-1/2) exp(-1/2*(x-mu)^*.sigma^{-1}.(x-mu)) ``` where `.` denotes the inner product on `R^k` and `^*` denotes transpose. #### Examples A single multi-variate Gaussian distribution is defined by a vector of means of length `k`, and a covariance matrix of shape `k x k`. Extra leading dimensions, if provided, allow for batches. ```python # Initialize a single 3-variate Gaussian with diagonal covariance. mu = [1, 2, 3.] sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]] dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol) # Evaluate this on an observation in R^3, returning a scalar. dist.pdf([-1, 0, 1]) # Initialize a batch of two 3-variate Gaussians. mu = [[1, 2, 3], [11, 22, 33.]] sigma = ... # shape 2 x 3 x 3, positive definite. dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma) # Evaluate this on a two observations, each in R^3, returning a length two # tensor. x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3. dist.pdf(x) ``` """ def __init__( self, mu, sigma, strict=True, strict_statistics=True, name="MultivariateNormalFull"): """Multivariate Normal distributions on `R^k`. User must provide means `mu` and `sigma`, the mean and covariance. Args: mu: `(N+1)-D` `float` or `double` tensor with shape `[N1,...,Nb, k]`, `b >= 0`. sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape `[N1,...,Nb, k, k]`. strict: Whether to validate input with asserts. If `strict` is `False`, and the inputs are invalid, correct behavior is not guaranteed. strict_statistics: Boolean, default True. If True, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If False, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: The name to give Ops created by the initializer. Raises: TypeError: If `mu` and `sigma` are different dtypes. """ cov = operator_pd_full.OperatorPDFull(sigma, verify_pd=strict) super(MultivariateNormalFull, self).__init__( mu, cov, strict_statistics=strict_statistics, strict=strict, name=name)
HaebinShin/tensorflow
tensorflow/contrib/distributions/python/ops/mvn.py
Python
apache-2.0
19,827
[ "Gaussian" ]
3d8d64ed39e0b3d891c13a0dd98865e37daf23ee51ad95dce71d3bcc97ea1e6a
#! /usr/bin/python # # This is amber2lammps, a program written by Keir E. Novik to convert # Amber files to Lammps files. # # Copyright 1999, 2000 Keir E. Novik; all rights reserved. # # Modified by Vikas Varshney, U Akron, 5 July 2005, as described in README # #============================================================ def Pop(S, I=-1): 'Pop item I from list' X = S[I] del S[I] return X #============================================================ class Lammps: #-------------------------------------------------------- def Dump(self): 'Write out contents of self (intended for debugging)' Name_list = self.__dict__.keys() Name_list.sort() for Name in Name_list: print Name + ':', self.__dict__[Name] #-------------------------------------------------------- def Write_data(self, Basename, Item_list): 'Write the Lammps data to file (used by Write_Lammps)' import os, sys Filename = 'data.' + Basename Dir_list = os.listdir('.') i = 1 while Filename in Dir_list: Filename = 'data' + `i` + '.' + Basename i = i +1 del i print 'Writing', Filename + '...', sys.stdout.flush() try: F = open(Filename, 'w') except IOError, Detail: print '(error:', Detail[1] + '!)' return try: F.writelines(Item_list) except IOError, Detail: print '(error:', Detail[1] + '!)' F.close() return F.close() print 'done.' #-------------------------------------------------------- def Write_Lammps(self, Basename): 'Write the Lammps data file, ignoring blank sections' import string L = [] L.append('LAMMPS data file for ' + self.name + '\n\n') L.append(`self.atoms` + ' atoms\n') L.append(`self.bonds` + ' bonds\n') L.append(`self.angles` + ' angles\n') L.append(`self.dihedrals` + ' dihedrals\n') L.append(`self.impropers` + ' impropers\n\n') L.append(`self.atom_types` + ' atom types\n') if self.bonds > 0: L.append(`self.bond_types` + ' bond types\n') if self.angles > 0: L.append(`self.angle_types` + ' angle types\n') if self.dihedrals > 0: L.append(`self.dihedral_types` + ' dihedral types\n') L.append('\n') L.append(`self.xlo` + ' ' + `self.xhi` + ' xlo xhi\n') L.append(`self.ylo` + ' ' + `self.yhi` + ' ylo yhi\n') L.append(`self.zlo` + ' ' + `self.zhi` + ' zlo zhi\n\n') if self.atom_types != 0: L.append('Masses\n\n') for i in range(self.atom_types): L.append(`i+1` + ' ' + `self.Masses[i]` + '\n') L.append('\n') L.append('Pair Coeffs\n\n') for i in range(self.atom_types): L.append(`i+1`) for j in range(len(self.Nonbond_Coeffs[0])): L.append(' ' + `self.Nonbond_Coeffs[i][j]`) L.append('\n') L.append('\n') if self.bonds != 0 and self.bond_types != 0: L.append('Bond Coeffs\n\n') for i in range(self.bond_types): L.append(`i+1`) for j in range(len(self.Bond_Coeffs[0])): L.append(' ' + `self.Bond_Coeffs[i][j]`) L.append('\n') L.append('\n') if self.angles != 0 and self.angle_types != 0: L.append('Angle Coeffs\n\n') for i in range(self.angle_types): L.append(`i+1`) for j in range(len(self.Angle_Coeffs[0])): L.append(' ' + `self.Angle_Coeffs[i][j]`) L.append('\n') L.append('\n') if self.dihedrals != 0 and self.dihedral_types != 0: L.append('Dihedral Coeffs\n\n') for i in range(self.dihedral_types): L.append(`i+1`) for j in range(len(self.Dihedral_Coeffs[0])): L.append(' ' + `self.Dihedral_Coeffs[i][j]`) L.append('\n') L.append('\n') if self.atoms != 0: L.append('Atoms\n\n') for i in range(self.atoms): L.append(`i+1`) for j in range(len(self.Atoms[0])): L.append(' ' + `self.Atoms[i][j]`) L.append('\n') L.append('\n') if self.bonds != 0 and self.bond_types != 0: L.append('Bonds\n\n') for i in range(self.bonds): L.append(`i+1`) for j in range(len(self.Bonds[0])): L.append(' ' + `self.Bonds[i][j]`) L.append('\n') L.append('\n') if self.angles != 0 and self.angle_types != 0: L.append('Angles\n\n') for i in range(self.angles): L.append(`i+1`) for j in range(len(self.Angles[0])): L.append(' ' + `self.Angles[i][j]`) L.append('\n') L.append('\n') if self.dihedrals != 0 and self.dihedral_types != 0: L.append('Dihedrals\n\n') for i in range(self.dihedrals): L.append(`i+1`) for j in range(len(self.Dihedrals[0])): L.append(' ' + `self.Dihedrals[i][j]`) L.append('\n') L.append('\n') self.Write_data(Basename, L) #============================================================ class Amber: def __init__(self): 'Initialise the Amber class' self.CRD_is_read = 0 self.TOP_is_read = 0 #-------------------------------------------------------- def Dump(self): 'Write out contents of self (intended for debugging)' Name_list = self.__dict__.keys() Name_list.sort() for Name in Name_list: print Name + ':', self.__dict__[Name] #-------------------------------------------------------- def Coerce_to_Lammps(self): 'Return the Amber data converted to Lammps format' import math if self.CRD_is_read and self.TOP_is_read: l = Lammps() print 'Converting...', l.name = self.ITITL l.atoms = self.NATOM l.bonds = self.NBONH + self.MBONA l.angles = self.NTHETH + self.MTHETA l.dihedrals = self.NPHIH + self.MPHIA l.impropers = 0 l.atom_types = self.NTYPES l.bond_types = self.NUMBND l.angle_types = self.NUMANG l.dihedral_types = self.NPTRA Shift = 0 if self.__dict__.has_key('BOX'): l.xlo = 0.0 l.xhi = self.BOX[0] l.ylo = 0.0 l.yhi = self.BOX[1] l.zlo = 0.0 l.zhi = self.BOX[2] if (l.xlo > min(self.X)) or (l.xhi < max(self.X)) or \ (l.ylo > min(self.Y)) or (l.yhi < max(self.Y)) or \ (l.zlo > min(self.Z)) or (l.zhi < max(self.Z)): # Vikas Modification: Disabling Shifting. This means I am intend to send exact coordinates of each atom and let LAMMPS # take care of imaging into periodic image cells. If one wants to shift all atoms in the periodic box, # please uncomment the below 2 lines. print '(warning: Currently not shifting the atoms to the periodic box)' #Shift = 1 else: print '(warning: Guessing at periodic box!)', l.xlo = min(self.X) l.xhi = max(self.X) l.ylo = min(self.Y) l.yhi = max(self.Y) l.zlo = min(self.Z) l.zhi = max(self.Z) # This doesn't check duplicate values l.Masses = [] for i in range(l.atom_types): l.Masses.append(0) for i in range(self.NATOM): l.Masses[self.IAC[i] - 1] = self.AMASS[i] l.Nonbond_Coeffs = [] for i in range(self.NTYPES): l.Nonbond_Coeffs.append([0,0]) for i in range(self.NTYPES): j = self.ICO[i * (self.NTYPES + 1)] - 1 if self.CN1[j] == 0.0: l.Nonbond_Coeffs[i][0] = 0.0 else: l.Nonbond_Coeffs[i][0] = \ 0.25 * (self.CN2[j])**2 / self.CN1[j] if self.CN2[j] == 0.0: l.Nonbond_Coeffs[i][1] = 0.0 else: l.Nonbond_Coeffs[i][1] = \ (self.CN1[j] / self.CN2[j])**(1.0/6.0) l.Bond_Coeffs = [] for i in range(self.NUMBND): l.Bond_Coeffs.append([0,0]) for i in range(self.NUMBND): l.Bond_Coeffs[i][0] = self.RK[i] l.Bond_Coeffs[i][1] = self.REQ[i] l.Angle_Coeffs = [] for i in range(self.NUMANG): l.Angle_Coeffs.append([0,0]) for i in range(self.NUMANG): l.Angle_Coeffs[i][0] = self.TK[i] l.Angle_Coeffs[i][1] = (180/math.pi) * self.TEQ[i] l.Dihedral_Coeffs = [] for i in range(self.NPTRA): l.Dihedral_Coeffs.append([0,0,0]) for i in range(self.NPTRA): l.Dihedral_Coeffs[i][0] = self.PK[i] if self.PHASE[i] == 0: l.Dihedral_Coeffs[i][1] = 1 else: l.Dihedral_Coeffs[i][1] = -1 l.Dihedral_Coeffs[i][2] = int(self.PN[i]) l.Atoms = [] for i in range(self.NATOM): x = self.X[i] y = self.Y[i] z = self.Z[i] if Shift: while x < l.xlo: x = x + self.BOX[0] while x > l.xhi: x = x - self.BOX[0] while y < l.ylo: y = y + self.BOX[1] while y > l.yhi: y = y - self.BOX[1] while z < l.zlo: z = z + self.BOX[2] while z > l.zhi: z = z - self.BOX[2] l.Atoms.append([0, self.IAC[i], self.CHRG[i]/18.2223, \ x, y, z]) l.Bonds = [] for i in range(l.bonds): l.Bonds.append([0,0,0]) for i in range(self.NBONH): l.Bonds[i][0] = self.ICBH[i] l.Bonds[i][1] = abs(self.IBH[i])/3 + 1 l.Bonds[i][2] = abs(self.JBH[i])/3 + 1 for i in range(self.NBONA): l.Bonds[self.NBONH + i][0] = self.ICB[i] l.Bonds[self.NBONH + i][1] = abs(self.IB[i])/3 + 1 l.Bonds[self.NBONH + i][2] = abs(self.JB[i])/3 + 1 l.Angles = [] for i in range(l.angles): l.Angles.append([0,0,0,0]) for i in range(self.NTHETH): l.Angles[i][0] = self.ICTH[i] l.Angles[i][1] = abs(self.ITH[i])/3 + 1 l.Angles[i][2] = abs(self.JTH[i])/3 + 1 l.Angles[i][3] = abs(self.KTH[i])/3 + 1 for i in range(self.NTHETA): l.Angles[self.NTHETH + i][0] = self.ICT[i] l.Angles[self.NTHETH + i][1] = abs(self.IT[i])/3 + 1 l.Angles[self.NTHETH + i][2] = abs(self.JT[i])/3 + 1 l.Angles[self.NTHETH + i][3] = abs(self.KT[i])/3 + 1 l.Dihedrals = [] for i in range(l.dihedrals): l.Dihedrals.append([0,0,0,0,0]) for i in range(self.NPHIH): l.Dihedrals[i][0] = self.ICPH[i] l.Dihedrals[i][1] = abs(self.IPH[i])/3 + 1 l.Dihedrals[i][2] = abs(self.JPH[i])/3 + 1 l.Dihedrals[i][3] = abs(self.KPH[i])/3 + 1 l.Dihedrals[i][4] = abs(self.LPH[i])/3 + 1 for i in range(self.NPHIA): l.Dihedrals[self.NPHIH + i][0] = self.ICP[i] l.Dihedrals[self.NPHIH + i][1] = abs(self.IP[i])/3 + 1 l.Dihedrals[self.NPHIH + i][2] = abs(self.JP[i])/3 + 1 l.Dihedrals[self.NPHIH + i][3] = abs(self.KP[i])/3 + 1 l.Dihedrals[self.NPHIH + i][4] = abs(self.LP[i])/3 + 1 print 'done.' return l else: print '(Error: Not all the Amber data has been read!)' #-------------------------------------------------------- def Read_data(self, Filename): 'Read the filename, returning a list of strings' import string, sys print 'Reading', Filename + '...', sys.stdout.flush() try: F = open(Filename) except IOError, Detail: print '(error:', Detail[1] + '!)' return try: Lines = F.readlines() except IOError, Detail: print '(error:', Detail[1] + '!)' F.close() return F.close() # If the first line is empty, use the Basename if Filename[-4:] == '.crd': if string.split(Lines[0]) == []: # This line corresponds to TITLE name in CRD file Basename = Filename[:string.find(Filename, '.')] Item_list = [Basename] print 'Warning: Title not present... Assigning Basename as Title' else: Item_list = [] else: if string.split(Lines[3]) == []: # This line corresponds to TITLE name in TOPOLOGY file Basename = Filename[:string.find(Filename, '.')] Item_list = [Basename] print 'Warning: Title not present... Assigning Basename as Title' else: Item_list = [] for Line in Lines: if Line[0]!='%': #Vikas' Modification: This condition ignores all the lines starting with % in the topology file. Item_list.extend(string.split(Line)) return Item_list #-------------------------------------------------------- def Read_CRD(self, Basename): 'Read the Amber coordinate/restart (.crd) file' # The optional velocities and periodic box size are not yet parsed. Item_list = self.Read_data(Basename + '.crd') if Item_list == None: return elif len(Item_list) < 2: print '(error: File too short!)' return # Parse the data if self.__dict__.has_key('ITITL'): if Pop(Item_list,0) != self.ITITL: print '(warning: ITITL differs!)', else: self.ITITL = Pop(Item_list,0) print self.ITITL #Vikas Modification : Priting the Title if self.__dict__.has_key('NATOM'): if eval(Pop(Item_list,0)) != self.NATOM: print '(error: NATOM differs!)' return else: self.NATOM = eval(Pop(Item_list,0)) print self.NATOM # Vikas' Modification: Printing number of atoms just to make sure that the program is reading the correct value. #if len(Item_list) == 1 + 3 * self.NATOM: # Vikas' Modification: I changed the condition. if (len(Item_list)%3) != 0: self.TIME = eval(Pop(Item_list,0)) else: self.TIME = 0 print self.TIME # Vikas' Modification : Printing simulation time, just to make sure that the program is readint the correct value. if len(Item_list) < 3 * self.NATOM: print '(error: File too short!)' return self.X = [] self.Y = [] self.Z = [] for i in range(self.NATOM): self.X.append(eval(Pop(Item_list,0))) self.Y.append(eval(Pop(Item_list,0))) self.Z.append(eval(Pop(Item_list,0))) if (self.NATOM == 1) and len(Item_list): print '(warning: Ambiguity!)', if len(Item_list) >= 3 * self.NATOM: self.VX = [] self.VY = [] self.VZ = [] for i in range(self.NATOM): self.VX.append(eval(Pop(Item_list,0))) self.VY.append(eval(Pop(Item_list,0))) self.VZ.append(eval(Pop(Item_list,0))) if len(Item_list) >= 3: self.BOX = [] for i in range(3): self.BOX.append(eval(Pop(Item_list,0))) if len(Item_list): print '(warning: File too large!)', print 'done.' self.CRD_is_read = 1 #-------------------------------------------------------- def Read_TOP(self, Basename): 'Read the Amber parameter/topology (.top) file' Item_list = self.Read_data(Basename + '.top') if Item_list == None: return elif len(Item_list) < 31: print '(error: File too short!)' return # Parse the data if self.__dict__.has_key('ITITL'): if Pop(Item_list,0) != self.ITITL: print '(warning: ITITL differs!)' else: self.ITITL = Pop(Item_list,0) print self.ITITL # Printing Self Title if self.__dict__.has_key('NATOM'): if eval(Pop(Item_list,0)) != self.NATOM: print '(error: NATOM differs!)' return else: self.NATOM = eval(Pop(Item_list,0)) print self.NATOM # Printing total number of atoms just to make sure that thing are going right self.NTYPES = eval(Pop(Item_list,0)) self.NBONH = eval(Pop(Item_list,0)) self.MBONA = eval(Pop(Item_list,0)) self.NTHETH = eval(Pop(Item_list,0)) self.MTHETA = eval(Pop(Item_list,0)) self.NPHIH = eval(Pop(Item_list,0)) self.MPHIA = eval(Pop(Item_list,0)) self.NHPARM = eval(Pop(Item_list,0)) self.NPARM = eval(Pop(Item_list,0)) self.NEXT = eval(Pop(Item_list,0)) self.NRES = eval(Pop(Item_list,0)) self.NBONA = eval(Pop(Item_list,0)) self.NTHETA = eval(Pop(Item_list,0)) self.NPHIA = eval(Pop(Item_list,0)) self.NUMBND = eval(Pop(Item_list,0)) self.NUMANG = eval(Pop(Item_list,0)) self.NPTRA = eval(Pop(Item_list,0)) self.NATYP = eval(Pop(Item_list,0)) self.NPHB = eval(Pop(Item_list,0)) self.IFPERT = eval(Pop(Item_list,0)) self.NBPER = eval(Pop(Item_list,0)) self.NGPER = eval(Pop(Item_list,0)) self.NDPER = eval(Pop(Item_list,0)) self.MBPER = eval(Pop(Item_list,0)) self.MGPER = eval(Pop(Item_list,0)) self.MDPER = eval(Pop(Item_list,0)) self.IFBOX = eval(Pop(Item_list,0)) self.NMXRS = eval(Pop(Item_list,0)) self.IFCAP = eval(Pop(Item_list,0)) #.................................................... if len(Item_list) < 5 * self.NATOM + self.NTYPES**2 + \ 2*(self.NRES + self.NUMBND + self.NUMANG) + \ 3*self.NPTRA + self.NATYP: print '(error: File too short!)' return -1 self.IGRAPH = [] Pop(Item_list,0) # A little kludge is needed here, since the IGRAPH strings are # not separated by spaces if 4 characters in length. for i in range(self.NATOM): if len(Item_list[0]) > 4: Item_list.insert(1, Item_list[0][4:]) Item_list.insert(1, Item_list[0][0:4]) del Item_list[0] self.IGRAPH.append(Pop(Item_list,0)) # Vikas' Modification : In the following section, I am printing out each quantity which is currently being read from the topology file. print 'Reading Charges...' self.CHRG = [] for i in range(self.NATOM): self.CHRG.append(eval(Pop(Item_list,0))) print 'Reading Atomic Masses...' self.AMASS = [] for i in range(self.NATOM): self.AMASS.append(eval(Pop(Item_list,0))) print 'Reading Atom Types...' self.IAC = [] for i in range(self.NATOM): self.IAC.append(eval(Pop(Item_list,0))) print 'Reading Excluded Atoms...' self.NUMEX = [] for i in range(self.NATOM): self.NUMEX.append(eval(Pop(Item_list,0))) print 'Reading Non-bonded Parameter Index...' self.ICO = [] for i in range(self.NTYPES**2): self.ICO.append(eval(Pop(Item_list,0))) print 'Reading Residue Labels...' self.LABRES = [] for i in range(self.NRES): self.LABRES.append(Pop(Item_list,0)) print 'Reading Residues Starting Pointers...' self.IPRES = [] for i in range(self.NRES): self.IPRES.append(eval(Pop(Item_list,0))) print 'Reading Bond Force Constants...' self.RK = [] for i in range(self.NUMBND): self.RK.append(eval(Pop(Item_list,0))) print 'Reading Equilibrium Bond Values...' self.REQ = [] for i in range(self.NUMBND): self.REQ.append(eval(Pop(Item_list,0))) print 'Reading Angle Force Constants...' self.TK = [] for i in range(self.NUMANG): self.TK.append(eval(Pop(Item_list,0))) print 'Reading Equilibrium Angle Values...' self.TEQ = [] for i in range(self.NUMANG): self.TEQ.append(eval(Pop(Item_list,0))) print 'Reading Dihedral Force Constants...' self.PK = [] for i in range(self.NPTRA): self.PK.append(eval(Pop(Item_list,0))) print 'Reading Dihedral Periodicity...' self.PN = [] for i in range(self.NPTRA): self.PN.append(eval(Pop(Item_list,0))) print 'Reading Dihedral Phase...' self.PHASE = [] for i in range(self.NPTRA): self.PHASE.append(eval(Pop(Item_list,0))) print 'Reading Solty...' #I think this is currently not used in AMBER. Check it out, though self.SOLTY = [] for i in range(self.NATYP): self.SOLTY.append(eval(Pop(Item_list,0))) #.................................................... if len(Item_list) < 2 * self.NTYPES * (self.NTYPES + 1) / 2: print '(error: File too short!)' return -1 print 'Reading LJ A Coefficient...' self.CN1 = [] for i in range(self.NTYPES * (self.NTYPES + 1) / 2): self.CN1.append(eval(Pop(Item_list,0))) print 'Reading LJ B Coefficient...' self.CN2 = [] for i in range(self.NTYPES * (self.NTYPES + 1) / 2): self.CN2.append(eval(Pop(Item_list,0))) #.................................................... if len(Item_list) < 3 * (self.NBONH + self.NBONA) + \ 4 * (self.NTHETH + self.NTHETA) + 5 * (self.NPHIH + self.NPHIA): print '(error: File too short!)' return -1 print 'Reading Bonds which include hydrogen...' self.IBH = [] self.JBH = [] self.ICBH = [] for i in range(self.NBONH): self.IBH.append(eval(Pop(Item_list,0))) self.JBH.append(eval(Pop(Item_list,0))) self.ICBH.append(eval(Pop(Item_list,0))) print 'Reading Bonds which dont include hydrogen...' self.IB = [] self.JB = [] self.ICB = [] for i in range(self.NBONA): self.IB.append(eval(Pop(Item_list,0))) self.JB.append(eval(Pop(Item_list,0))) self.ICB.append(eval(Pop(Item_list,0))) print 'Reading Angles which include hydrogen...' self.ITH = [] self.JTH = [] self.KTH = [] self.ICTH = [] for i in range(self.NTHETH): self.ITH.append(eval(Pop(Item_list,0))) self.JTH.append(eval(Pop(Item_list,0))) self.KTH.append(eval(Pop(Item_list,0))) self.ICTH.append(eval(Pop(Item_list,0))) print 'Reading Angles which dont include hydrogen...' self.IT = [] self.JT = [] self.KT = [] self.ICT = [] for i in range(self.NTHETA): self.IT.append(eval(Pop(Item_list,0))) self.JT.append(eval(Pop(Item_list,0))) self.KT.append(eval(Pop(Item_list,0))) self.ICT.append(eval(Pop(Item_list,0))) print 'Reading Dihedrals which include hydrogen...' self.IPH = [] self.JPH = [] self.KPH = [] self.LPH = [] self.ICPH = [] for i in range(self.NPHIH): self.IPH.append(eval(Pop(Item_list,0))) self.JPH.append(eval(Pop(Item_list,0))) self.KPH.append(eval(Pop(Item_list,0))) self.LPH.append(eval(Pop(Item_list,0))) self.ICPH.append(eval(Pop(Item_list,0))) print 'Reading Dihedrals which dont include hydrogen...' self.IP = [] self.JP = [] self.KP = [] self.LP = [] self.ICP = [] for i in range(self.NPHIA): self.IP.append(eval(Pop(Item_list,0))) self.JP.append(eval(Pop(Item_list,0))) self.KP.append(eval(Pop(Item_list,0))) self.LP.append(eval(Pop(Item_list,0))) self.ICP.append(eval(Pop(Item_list,0))) #.................................................... if len(Item_list) < self.NEXT + 3 * self.NPHB + 4 * self.NATOM: print '(error: File too short!)' return -1 print 'Reading Excluded Atom List...' self.NATEX = [] for i in range(self.NEXT): self.NATEX.append(eval(Pop(Item_list,0))) print 'Reading H-Bond A Coefficient, corresponding to r**12 term for all possible types...' self.ASOL = [] for i in range(self.NPHB): self.ASOL.append(eval(Pop(Item_list,0))) print 'Reading H-Bond B Coefficient, corresponding to r**10 term for all possible types...' self.BSOL = [] for i in range(self.NPHB): self.BSOL.append(eval(Pop(Item_list,0))) print 'Reading H-Bond Cut...' # I think it is not being used nowadays self.HBCUT = [] for i in range(self.NPHB): self.HBCUT.append(eval(Pop(Item_list,0))) print 'Reading Amber Atom Types for each atom...' self.ISYMBL = [] for i in range(self.NATOM): self.ISYMBL.append(Pop(Item_list,0)) print 'Reading Tree Chain Classification...' self.ITREE = [] for i in range(self.NATOM): self.ITREE.append(Pop(Item_list,0)) print 'Reading Join Array: Tree joining information' # Currently unused in Sander, an AMBER module self.JOIN = [] for i in range(self.NATOM): self.JOIN.append(eval(Pop(Item_list,0))) print 'Reading IRotate...' # Currently unused in Sander and Gibbs self.IROTAT = [] for i in range(self.NATOM): self.IROTAT.append(eval(Pop(Item_list,0))) #.................................................... if self.IFBOX > 0: if len(Item_list) < 3: print '(error: File too short!)' return -1 print 'Reading final residue which is part of solute...' self.IPTRES = eval(Pop(Item_list,0)) print 'Reading total number of molecules...' self.NSPM = eval(Pop(Item_list,0)) print 'Reading first solvent moleule index...' self.NSPSOL = eval(Pop(Item_list,0)) if len(Item_list) < self.NSPM + 4: print '(error: File too short!)' return -1 print 'Reading atom per molecule...' self.NSP = [] for i in range(self.NSPM): self.NSP.append(eval(Pop(Item_list,0))) self.BETA = eval(Pop(Item_list,0)) print 'Reading Box Dimensions...' if self.__dict__.has_key('BOX'): BOX = [] for i in range(3): BOX.append(eval(Pop(Item_list,0))) for i in range(3): if BOX[i] != self.BOX[i]: print '(warning: BOX differs!)', break del BOX else: self.BOX = [] for i in range(3): self.BOX.append(eval(Pop(Item_list,0))) #.................................................... if self.IFCAP > 0: if len(Item_list) < 5: print '(error: File too short!)' return -1 print 'Reading ICAP variables::: For details, refer to online AMBER format manual' self.NATCAP = eval(Pop(Item_list,0)) self.CUTCAP = eval(Pop(Item_list,0)) self.XCAP = eval(Pop(Item_list,0)) self.YCAP = eval(Pop(Item_list,0)) self.ZCAP = eval(Pop(Item_list,0)) #.................................................... if self.IFPERT > 0: if len(Item_list) < 4 * self.NBPER + 5 * self.NGPER + \ 6 * self.NDPER + self.NRES + 6 * self.NATOM: print '(error: File too short!)' return -1 print 'Reading perturb variables, 1. Bond, 2. Angles, 3. Dihedrals, etc etc.::: For details, refer to online AMBER format manual' self.IBPER = [] self.JBPER = [] for i in range(self.NBPER): self.IBPER.append(eval(Pop(Item_list,0))) self.JBPER.append(eval(Pop(Item_list,0))) self.ICBPER = [] for i in range(2 * self.NBPER): self.ICBPER.append(eval(Pop(Item_list,0))) self.ITPER = [] self.JTPER = [] self.KTPER = [] for i in range(self.NGPER): self.ITPER.append(eval(Pop(Item_list,0))) self.JTPER.append(eval(Pop(Item_list,0))) self.KTPER.append(eval(Pop(Item_list,0))) self.ICTPER = [] for i in range(2 * self.NGPER): self.ICTPER.append(eval(Pop(Item_list,0))) self.IPPER = [] self.JPPER = [] self.KPPER = [] self.LPPER = [] for i in range(self.NDPER): self.IPPER.append(eval(Pop(Item_list,0))) self.JPPER.append(eval(Pop(Item_list,0))) self.KPPER.append(eval(Pop(Item_list,0))) self.LPPER.append(eval(Pop(Item_list,0))) self.ICPPER = [] for i in range(2 * self.NDPER): self.ICPPER.append(eval(Pop(Item_list,0))) LABRES = [] for i in range(self.NRES): LABRES.append(Pop(Item_list,0)) for i in range(self.NRES): if LABRES[i] != self.LABRES[i]: print '(warning: BOX differs!)', break self.IGRPER = [] for i in range(self.NATOM): self.IGRPER.append(eval(Pop(Item_list,0))) self.ISMPER = [] for i in range(self.NATOM): self.ISMPER.append(eval(Pop(Item_list,0))) self.ALMPER = [] for i in range(self.NATOM): self.ALMPER.append(eval(Pop(Item_list,0))) self.IAPER = [] for i in range(self.NATOM): self.IAPER.append(eval(Pop(Item_list,0))) self.IACPER = [] for i in range(self.NATOM): self.IACPER.append(eval(Pop(Item_list,0))) self.CGPER = [] for i in range(self.NATOM): self.CGPER.append(eval(Pop(Item_list,0))) #.................................................... self.IPOL = 0 if self.IPOL == 1: if len(Item_list) < self.NATOM: print '(error: File too short!)' return -1 print 'Reading Polarizability Data. For details, refer to online AMBER format manual' self.ATPOL = [] for i in range(self.NATOM): self.ATPOL.append(eval(Pop(Item_list,0))) if self.IFPERT == 1: if len(Item_list) < self.NATOM: print '(error: File too short!)' return -1 self.ATPOL1 = [] for i in range(self.NATOM): self.ATPOL1.append(eval(Pop(Item_list,0))) #.................................................... if len(Item_list): print '(warning: File too large!)', print 'done.' self.TOP_is_read = 1 #============================================================ def Find_Amber_files(): 'Look for sets of Amber files to process' '''If not passed anything on the command line, look for pairs of Amber files (.crd and .top) in the current directory. For each set if there is no corresponding Lammps file (data.), or it is older than any of the Amber files, add its basename to a list of strings. This list is returned by the function''' # Date and existence checks not yet implemented import os, sys Basename_list = [] # Extract basenames from command line for Name in sys.argv[1:]: if Name[-4:] == '.crd': Basename_list.append(Name[:-4]) else: if Name[-4:] == '.top': Basename_list.append(Name[:-4]) else: Basename_list.append(Name) # Remove duplicate basenames for Basename in Basename_list[:]: while Basename_list.count(Basename) > 1: Basename_list.remove(Basename) if Basename_list == []: print 'Looking for Amber files...', Dir_list = os.listdir('.') Dir_list.sort() for File in Dir_list: if File[-4:] == '.top': Basename = File[:-4] if (Basename + '.crd') in Dir_list: Basename_list.append(Basename) if Basename_list != []: print 'found', for i in range(len(Basename_list)-1): print Basename_list[i] + ',', print Basename_list[-1] + '\n' if Basename_list == []: print 'none.\n' return Basename_list #============================================================ def Convert_Amber_files(): 'Handle the whole conversion process' print print 'Welcome to amber2lammps, a program to convert Amber files to Lammps format!' print Basename_list = Find_Amber_files() for Basename in Basename_list: a = Amber() a.Read_CRD(Basename) if a.CRD_is_read: a.Read_TOP(Basename) if a.TOP_is_read: l = a.Coerce_to_Lammps() l.Write_Lammps(Basename) del l del a print #============================================================ Convert_Amber_files()
browndeer/lammps-ocl
tools/amber2lmp/amber2lammps.py
Python
gpl-2.0
35,052
[ "Amber", "LAMMPS" ]
0089a90d2e4f6eae3247f5681d652156d977f70e1eedd59130a9745d6a099059
from __future__ import absolute_import from __future__ import print_function from __future__ import division import time import os import numpy as np import scipy import scipy.misc from skopt import gp_minimize from skopt.space import Categorical def expansion_number_to_string(expansion): if expansion == 0: return "u08" elif expansion == 1: return "qt" elif expansion == 2: return "ct" elif expansion == 3: return "ch3s10qt" elif expansion == 4: return "ch3s15qt" elif expansion == 5: return "ch3s20qt" elif expansion == 6: return "ch3s25qt" elif expansion == 7: return "ch2s10qt" elif expansion == 8: return "ch2s15qt" elif expansion == 9: return "ch2s20qt" elif expansion == 10: return "ch2s25qt" elif expansion == 11: return "ch2s30qt" elif expansion == 12: return "ch2s35qt" elif expansion == 13: return "ch2s40qt" elif expansion == 14: return "ch2s45qt" elif expansion == 15: return "ch2s50qt" elif expansion == 16: return "ch2s55qt" elif expansion == 17: return "ch2s60qt" elif expansion == 18: return "ch2s65qt" elif expansion == 19: return "ch2s70qt" elif expansion == 20: return "ch2s75qt" elif expansion == 21: return "ch2s80qt" else: ex = "invalid expansion number: " + str(expansion) raise Exception(ex) def string_to_expansion_number(string): if string == "u08Exp": return 0 elif string == "qtExp": return 1 elif string == "ctExp": return 2 elif string == "ch3s10qtExp": return 3 elif string == "ch3s15qtExp": return 4 elif string == "ch3s20qtExp": return 5 elif string == "ch3s25qtExp": return 6 elif string == "ch2s10qtExp": return 7 elif string == "ch2s15qtExp": return 8 elif string == "ch2s20qtExp": return 9 elif string == "ch2s25qtExp": return 10 elif string == "ch2s30qtExp": return 11 elif string == "ch2s35qtExp": return 12 elif string == "ch2s40qtExp": return 13 elif string == "ch2s45qtExp": return 14 elif string == "ch2s50qtExp": return 15 elif string == "ch2s55qtExp": return 16 elif string == "ch2s60qtExp": return 17 elif string == "ch2s65qtExp": return 18 elif string == "ch2s70qtExp": return 19 elif string == "ch2s75qtExp": return 20 elif string == "ch2s80qtExp": return 21 else: ex = "invalid expansion string: " + string raise Exception(ex) def cuicuilco_f_CE_Gauss(arguments): return 1.0 - cuicuilco_evaluation(arguments, measure="CR_Gauss") def cuicuilco_f_CE_Gauss_soft(arguments): return 1.0 - cuicuilco_evaluation(arguments, measure="CR_Gauss_soft") def cuicuilco_f_CE_Gauss_mix(arguments): return 1.0 - cuicuilco_evaluation(arguments, measure="CR_Gauss_mix") def cuicuilco_evaluation(arguments, measure="CR_Gauss", verbose=False): (L0_pca_out_dim, L0_sfa_out_dim, L1H_sfa_out_dim, L1V_sfa_out_dim, L2H_sfa_out_dim, L2V_sfa_out_dim, L3H_sfa_out_dim, L3V_sfa_out_dim, L0_delta_threshold, L1H_delta_threshold, L1V_delta_threshold, L2H_delta_threshold, L2V_delta_threshold, L3H_delta_threshold, L3V_delta_threshold, L0_expansion, L1H_expansion, L1V_expansion, L2H_expansion, L2V_expansion, L3H_expansion, L3V_expansion, L4_degree_QT, L4_degree_CT) = arguments print("invoking cuicuilco_evaluation with arguments:", arguments) # Testing whether arguments are compatible incompatible = 0 if L0_pca_out_dim + L0_delta_threshold < L0_sfa_out_dim: L0_delta_threshold = L0_sfa_out_dim - L0_pca_out_dim print("Attempting to solve incompatibility case 1", L0_pca_out_dim, L0_delta_threshold, L0_sfa_out_dim) if L0_delta_threshold < 1 or L0_delta_threshold > 20: incompatible = 21 if 2 * L2H_sfa_out_dim + L2V_delta_threshold < L2V_sfa_out_dim: L2V_delta_threshold - 2 * L2H_sfa_out_dim if L2V_delta_threshold < 1 or L2V_delta_threshold > 20: incompatible = 22 if L0_pca_out_dim + L0_delta_threshold < L0_sfa_out_dim: incompatible = 1 elif 2 * L0_sfa_out_dim + L1H_delta_threshold < L1H_sfa_out_dim: # This factor is 2 and not 3 due to overlap incompatible = 2 elif 2 * L1H_sfa_out_dim + L1V_delta_threshold < L1V_sfa_out_dim: # This factor is 2 and not 3 due to overlap incompatible = 3 elif 2 * L1V_sfa_out_dim + L2H_delta_threshold < L2H_sfa_out_dim: incompatible = 4 elif 2 * L2H_sfa_out_dim + L2V_delta_threshold < L2V_sfa_out_dim: incompatible = 5 elif 2 * L2V_sfa_out_dim + L3H_delta_threshold < L3H_sfa_out_dim: incompatible = 6 elif 2 * L3H_sfa_out_dim + L3V_delta_threshold < L3V_sfa_out_dim: incompatible = 7 if L1H_delta_threshold > (2 + 3) * L0_sfa_out_dim: incompatible = 8 elif L1V_delta_threshold > (2 + 3) * L1H_sfa_out_dim: incompatible = 9 elif L2H_delta_threshold > 2 * L1V_sfa_out_dim: # the factor here should be actually 4, right? incompatible = 10 elif L2V_delta_threshold > 2 * L2H_sfa_out_dim: incompatible = 11 elif L3H_delta_threshold > 2 * L2V_sfa_out_dim: incompatible = 12 elif L3V_delta_threshold > 2 * L3H_sfa_out_dim: incompatible = 13 if L0_delta_threshold > L0_sfa_out_dim: incompatible = 14 elif L1H_delta_threshold > L1H_sfa_out_dim: incompatible = 15 elif L1V_delta_threshold > L1V_sfa_out_dim: incompatible = 16 elif L2H_delta_threshold > L2H_sfa_out_dim: incompatible = 17 elif L2V_delta_threshold > L2V_sfa_out_dim: incompatible = 18 elif L3H_delta_threshold > L3H_sfa_out_dim: incompatible = 19 elif L3V_delta_threshold > L3V_sfa_out_dim: incompatible = 20 if incompatible: print("Configuration (before fixes):", arguments, " is incompatible (%d) and was skipped" % incompatible) return 0.0 # Update arguments variable arguments = (L0_pca_out_dim, L0_sfa_out_dim, L1H_sfa_out_dim, L1V_sfa_out_dim, L2H_sfa_out_dim, L2V_sfa_out_dim, L3H_sfa_out_dim, L3V_sfa_out_dim, L0_delta_threshold, L1H_delta_threshold, L1V_delta_threshold, L2H_delta_threshold, L2V_delta_threshold, L3H_delta_threshold, L3V_delta_threshold, L0_expansion, L1H_expansion, L1V_expansion, L2H_expansion, L2V_expansion, L3H_expansion, L3V_expansion, L4_degree_QT, L4_degree_CT) print("Creating configuration file ") fd = open("MNISTNetwork_24x24_7L_Overlap_config.txt", "w") txt = "" for entry in arguments: txt += str(entry)+ " " fd.write(txt) fd.close() print("created configuration file with contents:", txt) cuicuilco_experiment_seeds = [112210, 112220, 112230] #, 112240] #[112244, 112255, 112266, 112277] # , 112277] metrics = [] for cuicuilco_experiment_seed in cuicuilco_experiment_seeds: #112233 #np.random.randint(2**25) # np.random.randn() os.putenv("CUICUILCO_EXPERIMENT_SEED", str(cuicuilco_experiment_seed)) print("Setting CUICUILCO_EXPERIMENT_SEED: ", str(cuicuilco_experiment_seed)) output_filename = "hyper_t/MNIST_24x24_7L_L0cloneL_%dPC_%dSF_%sExp_%dF_" + \ "L1cloneL_%dSF_%sExp_%dF_L2clone_%dSF_%sExp_%dF_L3cloneL_%dSF_%sExp_%dF_" + \ "L4cloneL_%dSF_%sExp_%dF_L5_%dSF_%sExp_%dF_L6_%dSF_%sExp_%dF_NoHead_QT%dAP_CT%dAP_seed%d.txt" output_filename = output_filename % (L0_pca_out_dim, L0_delta_threshold, expansion_number_to_string(L0_expansion), L0_sfa_out_dim, L1H_delta_threshold, expansion_number_to_string(L1H_expansion), L1H_sfa_out_dim, L1V_delta_threshold, expansion_number_to_string(L1V_expansion), L1V_sfa_out_dim, L2H_delta_threshold, expansion_number_to_string(L2H_expansion), L2H_sfa_out_dim, L2V_delta_threshold, expansion_number_to_string(L2V_expansion), L2V_sfa_out_dim, L3H_delta_threshold, expansion_number_to_string(L3H_expansion), L3H_sfa_out_dim, L3V_delta_threshold, expansion_number_to_string(L3V_expansion), L3V_sfa_out_dim, L4_degree_QT, L4_degree_CT, cuicuilco_experiment_seed) if os.path.isfile(output_filename): print("file %s already exists, skipping its computation" % output_filename) else: command = "time nice -n 19 python -u -m cuicuilco.cuicuilco_run --EnableDisplay=0 --CacheAvailable=0 " + \ "--NetworkCacheReadDir=/local/tmp/escalafl/Alberto/SavedNetworks " + \ "--NetworkCacheWriteDir=/local/tmp/escalafl/Alberto/SavedNetworks " + \ "--NodeCacheReadDir=/local/tmp/escalafl/Alberto/SavedNodes " + \ "--NodeCacheWriteDir=/local/tmp/escalafl/Alberto/SavedNodes " + \ "--ClassifierCacheWriteDir=/local/tmp/escalafl/Alberto/SavedClassifiers " + \ "--SaveSubimagesTraining=0 --SaveAverageSubimageTraining=0 --NumFeaturesSup=9 " + \ "--SaveSorted_AE_GaussNewid=0 --SaveSortedIncorrectClassGaussNewid=0 " + \ "--ComputeSlowFeaturesNewidAcrossNet=0 --UseFilter=0 --EnableGC=1 --SFAGCReducedDim=0 --EnableKNN=0 " + \ "--kNN_k=3 --EnableNCC=0 --EnableSVM=0 --SVM_C=0.125 --SVM_gamma=1.0 --EnableLR=0 " + \ "--AskNetworkLoading=0 --LoadNetworkNumber=-1 --NParallel=2 --EnableScheduler=0 " + \ "--EstimateExplainedVarWithInverse=0 --EstimateExplainedVarWithKNN_k=0 " + \ "--EstimateExplainedVarWithKNNLinApp=0 --EstimateExplainedVarLinGlobal_N=0 --AddNormalizationNode=0 " + \ "--MakeLastPCANodeWhithening=0 --FeatureCutOffLevel=-1.0 --ExportDataToLibsvm=0 " + \ "--IntegerLabelEstimation=0 --MapDaysToYears=0 --CumulativeScores=0 --DatasetForDisplayNewid=0 " + \ "--GraphExactLabelLearning=0 --OutputInsteadOfSVM2=0 --NumberTargetLabels=0 --EnableSVR=0 " + \ "--SVR_gamma=0.85 --SVR_C=48.0 --SVR_epsilon=0.075 --SVRInsteadOfSVM2=1 --ObjectiveLabel=0 " + \ "--ExperimentalDataset=ParamsMNISTFunc --HierarchicalNetwork=MNISTNetwork_24x24_7L_Overlap_dd2_config " + \ "--SleepM=0 2>&1 > " + output_filename print("excecuting command: ", command) os.system(command) if verbose: print("extracting performance metric from resulting file") metric = extract_performance_metric_from_file(output_filename, measure=measure) metrics.append(metric) return np.array(metric).mean() def extract_performance_metric_from_file(output_filename, measure = "CR_Gauss", verbose=False): command_extract = "cat %s | grep New | grep CR_G > del_tmp.txt" % output_filename os.system(command_extract) fd = open("del_tmp.txt", "r") metrics = fd.readline().split(" ") fd.close() if verbose: print("metrics: ", metrics) if len(metrics) > 10 and metrics[6] == "CR_Gauss": metric_CR_Gauss = float(metrics[7].strip(",")) metric_CR_Gauss_soft = float(metrics[9].strip(",")) if np.isnan(metric_CR_Gauss_soft): print("warning, nan metric was found and fixed as metric_CR_Gauss - 0.0001") metric_CR_Gauss_soft = metric_CR_Gauss - 0.0001 else: print("unable to find metrics in file (defaulting to 0.95)") metric_CR_Gauss = 0.95 metric_CR_Gauss_soft = 0.95 if measure == "CR_Gauss": metric = metric_CR_Gauss elif measure == "CR_Gauss_soft": metric = metric_CR_Gauss_soft elif measure == "CR_Gauss_mix": metric = 0.5 * (metric_CR_Gauss + metric_CR_Gauss_soft) else: er = "invalid measure: " + str(measure) raise Exception(er) # print("metric_CR_Gauss: ", metric_CR_Gauss, " metric_CR_Gauss_soft:", metric_CR_Gauss_soft) return metric def load_saved_executions(measure="CR_Gauss", dimensions=None, verbose=False): path = "hyper_t" only_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] only_files = [f for f in only_files if f.startswith("MNIST_24x24_7L")] arguments_list = [] results_list = [] for f in only_files: # print("filename %s was found" % f) # MNIST_24x24_7L_L0cloneL_16PC_1SF_qtExp_25F_L1cloneL_1SF_u08Exp_20F_L2clone_30SF_u08Exp_80F_L3cloneL_1SF_u08Exp_100F_L4cloneL_20F_u08Exp_120F_L5_20F_u08Exp_90SF_L6_20F_u08Exp_250SF_NoHead_QT90AP_CT25AP_seed13153651.txt vals = f.split("_") vals = [val.strip("PCFSseedQTA.txt") for val in vals] if verbose: print("vals=", vals) # quit() if len(vals) >= 36: L0_pca_out_dim = int(vals[4]) L0_sfa_out_dim = int(vals[7]) L1H_sfa_out_dim = int(vals[11]) L1V_sfa_out_dim = int(vals[15]) L2H_sfa_out_dim = int(vals[19]) L2V_sfa_out_dim = int(vals[23]) L3H_sfa_out_dim = int(vals[27]) L3V_sfa_out_dim = int(vals[31]) L0_delta_threshold = int(vals[5]) L1H_delta_threshold = int(vals[9]) L1V_delta_threshold = int(vals[13]) L2H_delta_threshold = int(vals[17]) L2V_delta_threshold = int(vals[21]) L3H_delta_threshold = int(vals[25]) L3V_delta_threshold = int(vals[29]) L0_expansion = string_to_expansion_number(vals[6]) L1H_expansion = string_to_expansion_number(vals[10]) L1V_expansion = string_to_expansion_number(vals[14]) L2H_expansion = string_to_expansion_number(vals[18]) L2V_expansion = string_to_expansion_number(vals[22]) L3H_expansion = string_to_expansion_number(vals[26]) L3V_expansion = string_to_expansion_number(vals[30]) L4_degree_QT = int(vals[33]) L4_degree_CT = int(vals[34]) seed = int(vals[35]) arguments = [L0_pca_out_dim, L0_sfa_out_dim, L1H_sfa_out_dim, L1V_sfa_out_dim, L2H_sfa_out_dim, L2V_sfa_out_dim, L3H_sfa_out_dim, L3V_sfa_out_dim, L0_delta_threshold, L1H_delta_threshold, L1V_delta_threshold, L2H_delta_threshold, L2V_delta_threshold, L3H_delta_threshold, L3V_delta_threshold, L0_expansion, L1H_expansion, L1V_expansion, L2H_expansion, L2V_expansion, L3H_expansion, L3V_expansion, L4_degree_QT, L4_degree_CT] if verbose: print("parsed arguments:", arguments) metric = extract_performance_metric_from_file(os.path.join(path, f), measure) arguments_list.append(arguments) results_list.append(metric) else: print("Error parging values", vals) if len(arguments_list) > 0: results_list = np.array(results_list) # arguments_list = np.array(arguments_list, dtype=int) ordering = np.argsort(results_list)[::-1] results_list = results_list[ordering] sorted_arguments_list = [] for i in range(len(ordering)): sorted_arguments_list.append(arguments_list[ordering[i]]) arguments_list = sorted_arguments_list # print("ordered results_list: ", results_list) # print("ordered arguments_list: ") for arguments in arguments_list: print(arguments) if dimensions is not None: validity_values = [] for i, arguments in enumerate(arguments_list): valid = True for j, dim in enumerate(dimensions): arg_value = arguments[j] if isinstance(dim, Categorical): if arguments[j] not in dim.categories: valid = False if verbose: print("entry %d failed validity for argument %d with value %d" % (i, j, arg_value), dim.categories) elif isinstance(dim, tuple) and len(dim) == 2: if dim[0] > arguments[j] or dim[1] < arguments[j]: valid = False if verbose: print("entry %d failed validity for argument %d with value %d" % (i, j, arg_value), dim) elif isinstance(dim, list): if arguments[j] not in dim: valid = False if verbose: print("entry %d failed validity for argument %d with value %d" % (i, j, arg_value), dim) validity_values.append(valid) print("validity_values:", validity_values) filtered_arguments_list = [] for i in range(len(validity_values)): if validity_values[i]: filtered_arguments_list.append(arguments_list[i]) arguments_list = filtered_arguments_list results_list = results_list[validity_values] # if len(arguments_list) == 0: # arguments_list = None # results_list = None # print("final ordered results_list: ", results_list) # print("final ordered arguments_list: ") for arguments in arguments_list: print(arguments) # quit() if len(arguments_list) == 0: arguments_list = None results_list = None return arguments_list, results_list def display_best_arguments(arguments_list, results_list, consider_std=True): if arguments_list is None: print("arguments_list is None") return None, None arguments_results_dict = {} for i, arguments in enumerate(arguments_list): arg_tuple = tuple(arguments) if arg_tuple in arguments_results_dict: arguments_results_dict[arg_tuple].append(results_list[i]) else: arguments_results_dict[arg_tuple] = [results_list[i]] # Average all entries with the same key averaged_arguments_list = [] averaged_results_list = [] results_stds = [] results_lens = [] for arg in arguments_results_dict.keys(): averaged_arguments_list.append(arg) averaged_results_list.append(np.array(arguments_results_dict[arg]).mean()) results_stds.append(np.array(arguments_results_dict[arg]).std()) results_lens.append(len(arguments_results_dict[arg])) # print("std: ", np.array(arguments_results_dict[arg]).std(), " len:", len(arguments_results_dict[arg])) # print("averaged_arguments_list=", averaged_arguments_list) # print("averaged_results_list=", averaged_results_list) # sort averaged_results_list = np.array(averaged_results_list) results_stds = np.array(results_stds) results_lens = np.array(results_lens) if consider_std: ordering = np.argsort(averaged_results_list - 0.5 * results_stds/(results_lens-1)**0.5)[::-1] else: ordering = np.argsort(averaged_results_list)[::-1] averaged_results_list = averaged_results_list[ordering] results_stds = results_stds[ordering] results_lens = results_lens[ordering] averaged_sorted_arguments_list = [] for i in range(len(ordering)): averaged_sorted_arguments_list.append(averaged_arguments_list[ordering[i]]) averaged_arguments_list = averaged_sorted_arguments_list print("averaged ordered results_list: ", averaged_results_list) print("results_stds: ", results_stds) corrected_results_list = averaged_results_list - 0.5 * results_stds/(results_lens-1)**0.5 print("averaged ordered results_list - 0.5 * results_stds/factor: ", corrected_results_list) print("results_lens: ", results_lens) print("averaged ordered arguments_list: ") for arguments in averaged_arguments_list: print("(", end="") for arg in arguments: print("%3d, "%arg, end="") print(")") if consider_std: final_results_list = corrected_results_list else: final_results_list = averaged_results_list return averaged_arguments_list, final_results_list def progress_callback(res): print("C", end="") #def gp_minimize(func, dimensions, base_estimator=None, n_calls=100, n_random_starts=10, acq_func='gp_hedge', # acq_optimizer='auto', x0=None, y0=None, random_state=None, verbose=False, callback=None, # n_points=10000, n_restarts_optimizer=5, xi=0.01, kappa=1.96, noise='gaussian', n_jobs=1) # ['13', '17', '33', '57', '85', '90', '87', '170', '15', '10', '19', '23', '14', '8', '4', '1', '0', '0', '0', '0', '0', '13', '79', '20'] # [13, 17, 33, 57, 85, 90, 87, 170, 15, 10, 19, 23, 14, 8, 4, 1, 0, 0, 0, 0, 0, 13, 79, 20] # 13 20 28 50 70 90 120 200 9 19 10 26 6 6 9 0 0 0 0 0 0 0 90 25 # Output dimensionalities (PCA and iGSFA) range_L0_pca_out_dim = (12, 13) # O [13] # (12, 14) # (10, 16) # 13 range_L0_sfa_out_dim = (16, 21) # N (15, 23) # O (18, 23) # (15, 25) # [20] # (20, 21) range_L1H_sfa_out_dim = (32, 38) #E (32, 35) # O (33, 38) # (31, 34) # (20, 36) # [28] # (28, 29) range_L1V_sfa_out_dim = (54, 65) # N (50, 65) # (50, 63) # [50] # (50, 51) range_L2H_sfa_out_dim = (65, 77) # N (65, 95) #E (65, 75) # O (68, 78) # [70] # (70, 71) range_L2V_sfa_out_dim = (89, 96) # N (72, 100) #E (75, 100) # O (68, 95) # [90] # (90, 91) range_L3H_sfa_out_dim = (111, 150) # N (92, 145) #E (125, 145) # O (100, 145) # [120] # (120, 121) range_L3V_sfa_out_dim = (139, 230) #E (170, 216) # O (170, 230) #[200] # (200, 201) # Length of slow part range_L0_delta_threshold = (10, 18) # O (12, 18) # (1, 20) # [9] # #(9, 10) # range_L1H_delta_threshold = (7, 16) # N (7, 18) #E (10, 20) # O (7, 14) # [19] # (19, 20) range_L1V_delta_threshold = (4, 18) # E(7, 18) # O (7, 15) # [10] # (10, 11) range_L2H_delta_threshold = (33, 50) # N (15, 46) # O (23, 45) # [26] # (26, 27) range_L2V_delta_threshold = (0, 22) # O (0, 7) # [6] # (6, 7) range_L3H_delta_threshold = (0, 14) # O [0] # [6] # (6, 7) range_L3V_delta_threshold = (9, 13) # O [9] # (3, 5) # [9] # (9, 10) # WARNING two categories cannot be expressed as [n1, n2], instead use e.g., # otherwise interval (n1, n2) is assumed # Expansions range_L0_expansion = [1] # N (0, 1) # O [1] # [0] # (0, 1) range_L1H_expansion = [0] # N Categorical([0, 3]) # O [0] # TRY ALSO 3 [0, 0, 3] # (0, 1) range_L1V_expansion = Categorical([0, 3]) # O [3] # (0, 1) range_L2H_expansion = [4] # N Categorical([0, 3, 4]) #E (3, 4) # O [0] # (0, 1) range_L2V_expansion = Categorical([0, 3, 4]) #E (3, 4) # O [0] # Categorical([0, 3]) #WARNING############################# [0, 3] # (0, 1) range_L3H_expansion = (6, 16) # N (0, 15) #E (6, 15) # O [7] # [0, 7, 8, 9, 10] # (0, 0) range_L3V_expansion = (17, 21) # N (0, 21) #E (15, 20) # O (11, 21) # [0, 7, 8, 9] (0, 0) range_L4_degree_QT = (40, 109) # O (40, 119) # [90] # (90, 90) range_L4_degree_CT = (13, 26) # O (10, 26) # [25] # (25, 25) cuicuilco_dimensions = (range_L0_pca_out_dim, range_L0_sfa_out_dim, range_L1H_sfa_out_dim, range_L1V_sfa_out_dim, range_L2H_sfa_out_dim, range_L2V_sfa_out_dim, range_L3H_sfa_out_dim, range_L3V_sfa_out_dim, range_L0_delta_threshold, range_L1H_delta_threshold, range_L1V_delta_threshold, range_L2H_delta_threshold, range_L2V_delta_threshold, range_L3H_delta_threshold, range_L3V_delta_threshold, range_L0_expansion, range_L1H_expansion, range_L1V_expansion, range_L2H_expansion, range_L2V_expansion, range_L3H_expansion, range_L3V_expansion, range_L4_degree_QT, range_L4_degree_CT) # tuple or list? print("cuicuilco_dimensions:", cuicuilco_dimensions) # ( 13, 20, 36, 61, 75, 95, 140, 210, 16, 12, 10, 40, 5, 0, 9, 1, 0, 3, 0, 0, 7, 20, 109, 15, ) #( 13, 19, 33, 51, 73, 90, 114, 188, 16, 11, 15, 29, 3, 0, 9, 1, 0, 3, 0, 0, 7, 19, 42, 24, ) #( 13, 20, 36, 60, 72, 89, 139, 170, 14, 7, 10, 40, 5, 0, 9, 1, 0, 3, 0, 0, 7, 19, 101, 19, ) #( 13, 19, 35, 54, 71, 91, 111, 196, 14, 11, 14, 36, 3, 0, 9, 1, 0, 3, 0, 0, 7, 17, 80, 21, ) #( 13, 19, 34, 53, 72, 89, 130, 200, 14, 12, 13, 36, 1, 0, 9, 1, 0, 3, 0, 0, 7, 17, 83, 24, ) # np.random.seed(1234) # use a new random seed each time to allow combination of executions on different systems argument_list, results_list = load_saved_executions(measure="CR_Gauss_mix", dimensions=cuicuilco_dimensions, verbose=False) display_best_arguments(argument_list, results_list) quit() #argument_list = None #results_list = None #argument_list = [ # Best hyperparameters for original slow feature scaling method #[13, 22, 38, 56, 77, 77, 124, 230, 17, 9, 14, 33, 6, 0, 9, 1, 0, 3, 0, 0, 7, 18, 91, 19], #[13, 21, 37, 55, 78, 95, 108, 170, 18, 7, 15, 45, 2, 0, 9, 1, 0, 3, 0, 0, 7, 21, 40, 26], #[13, 19, 35, 54, 71, 91, 111, 196, 14, 11, 14, 36, 3, 0, 9, 1, 0, 3, 0, 0, 7, 17, 80, 21], #[13, 17, 33, 65, 95, 72, 92, 139, 15, 13, 13, 24, 4, 0, 3, 2, 0, 3, 0, 0, 7, 9, 89, 24], #[13, 17, 34, 54, 95, 76, 100, 144, 13, 18, 4, 30, 4, 0, 1, 1, 0, 0, 0, 0, 0, 0, 98, 24], #[13, 22, 38, 56, 77, 77, 124, 230, 17, 9, 14, 33, 6, 0, 9, 1, 0, 3, 0, 0, 7, 18, 91, 19] #] #[12, 15, 35, 65, 70, 95, 140, 196, 10, 10, 12, 29, 16, 4, 11, 1, 3, 0, 4, 4, 15, 20, 109, 18], #[12, 23, 35, 64, 67, 98, 127, 184, 12, 14, 18, 29, 1, 2, 9, 0, 3, 0, 4, 4, 9, 18, 109, 20], #[15, 19, 34, 59, 74, 95, 131, 208, 14, 12, 14, 39, 1, 10, 10, 0, 3, 0, 3, 3, 14, 13, 57, 18], #[15, 20, 40, 58, 80, 76, 134, 201, 14, 12, 13, 47, 10, 10, 9, 1, 0, 3, 3, 3, 13, 15, 90, 13], #[14, 19, 35, 58, 81, 91, 127, 203, 11, 15, 17, 42, 7, 10, 9, 0, 3, 3, 0, 4, 7, 17, 100, 22], #[14, 23, 37, 58, 69, 100, 141, 222, 12, 16, 18, 16, 6, 3, 9, 1, 3, 3, 3, 3, 12, 19, 59, 21], #[14, 23, 34, 64, 68, 73, 118, 216, 16, 11, 16, 25, 9, 9, 11, 0, 0, 0, 3, 0, 14, 20, 73, 23], #[12, 20, 40, 60, 65, 68, 118, 170, 18, 18, 7, 15, 20, 12, 12, 0, 3, 3, 3, 3, 15, 20, 82, 23], #[12, 24, 35, 58, 76, 84, 131, 203, 12, 15, 13, 43, 20, 4, 12, 1, 3, 3, 3, 0, 11, 19, 107, 10], #[16, 15, 36, 54, 82, 88, 145, 218, 12, 10, 12, 37, 20, 3, 12, 0, 0, 3, 4, 4, 14, 19, 97, 10]] # 13, 18, 34, 55, 75, 73, 102, 169, 16, 10, 10, 29, 2, 0, 9, 1, 0, 3, 0, 0, 7, 12, 89, 24]] #argument_list += [[13, 17, 34, 61, 88, 94, 84, 139, 14, 11, 17, 23, 5, 7, 4, 0, 0, 0, 0, 3, 7, 14, 54, 24], # [13, 16, 33, 60, 82, 82, 99, 162, 15, 10, 18, 26, 10, 1, 0, 0, 0, 3, 0, 3, 9, 14, 36, 3], # [13, 17, 31, 56, 87, 81, 88, 171, 13, 14, 13, 28, 3, 7, 0, 0, 0, 3, 0, 0, 10, 14, 66, 21], # [13, 15, 32, 58, 79, 75, 86, 142, 13, 10, 16, 28, 9, 2, 0, 0, 0, 0, 0, 3, 9, 14, 12, 11]] #quit() if results_list is not None: results_list = [1.0 - result for result in results_list] print("cuicuilco_dimensions:", cuicuilco_dimensions) t0 = time.time() res = gp_minimize(func=cuicuilco_f_CE_Gauss_mix, dimensions=cuicuilco_dimensions, base_estimator=None, n_calls=50, n_random_starts=0, # 20 10 acq_func='gp_hedge', acq_optimizer='auto', x0=argument_list, y0=results_list, random_state=None, verbose=False, callback=progress_callback, n_points=1*10000, n_restarts_optimizer=5, # n_points=10000 xi=0.01, kappa=1.96, noise='gaussian', n_jobs=1) t1 = time.time() print("res:", res) print("Execution time: %0.3f s" % (t1 - t0))
AlbertoEsc/cuicuilco
hyperparameter_search_dd2.py
Python
apache-2.0
28,018
[ "Gaussian" ]
920a2072cd45dab429ba75117ff0e03e553822c25cb2ff50e167968127bacadf
#!/usr/bin/env python """ Test specific of JobParameters with and without the flag in for ES backend flag in /Operations/[]/Services/JobMonitoring/useESForJobParametersFlag """ import os import time import DIRAC DIRAC.initialize() # Initialize configuration from DIRAC.WorkloadManagementSystem.Client.WMSClient import WMSClient # sut from DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient import JobMonitoringClient from DIRAC.WorkloadManagementSystem.Client.JobStateUpdateClient import JobStateUpdateClient from DIRAC.tests.Utilities.WMS import helloWorldJob, createFile jobMonitoringClient = JobMonitoringClient() jobStateUpdateClient = JobStateUpdateClient() def createJob(): job = helloWorldJob() jobDescription = createFile(job) wmsClient = WMSClient() res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription)) assert res["OK"], res["Message"] jobID = int(res["Value"]) return jobID def updateFlag(): # Here now setting the flag as the following inside /Operations/Defaults: # in Operations/Defaults/Services/JobMonitoring/useESForJobParametersFlag from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI csAPI = CSAPI() res = csAPI.createSection("Operations/Defaults/Services/") if not res["OK"]: print(res["Message"]) exit(1) res = csAPI.createSection("Operations/Defaults/Services/JobMonitoring/") if not res["OK"]: print(res["Message"]) exit(1) csAPI.setOption("Operations/Defaults/Services/JobMonitoring/useESForJobParametersFlag", True) csAPI.commit() # Now we need to restart the services for the new configuration to be picked up time.sleep(2) os.system("dirac-restart-component WorkloadManagement JobMonitoring") os.system("dirac-restart-component WorkloadManagement JobStateUpdate") time.sleep(5) def _checkWithRetries(fcn, args, expected): for i in range(3): res = fcn(*args) assert res["OK"], res["Message"] if res["Value"] == expected: return time.sleep(1) assert res["Value"] == expected, "Failed to call %s after 3 retries" def test_MySQLandES_jobParameters(): """a basic put - remove test, changing the flag in between""" # First, create a job jobID = createJob() # Use the MySQL backend res = jobStateUpdateClient.setJobParameter(jobID, "ParName-fromMySQL", "ParValue-fromMySQL") assert res["OK"], res["Message"] _checkWithRetries( jobMonitoringClient.getJobParameter, (jobID, "ParName-fromMySQL"), {"ParName-fromMySQL": "ParValue-fromMySQL"}, ) res = jobMonitoringClient.getJobParameters(jobID) # This will be looked up in MySQL only assert res["OK"], res["Message"] assert isinstance(res["Value"], dict), res["Value"] assert res["Value"] == {jobID: {"ParName-fromMySQL": "ParValue-fromMySQL"}}, res["Value"] res = jobMonitoringClient.getJobOwner(jobID) assert res["OK"], res["Message"] assert res["Value"] == "adminusername", res["Value"] res = jobStateUpdateClient.setJobsParameter({jobID: ["SomeStatus", "Waiting"]}) assert res["OK"], res["Message"] _checkWithRetries( jobMonitoringClient.getJobParameters, (jobID,), {jobID: {"ParName-fromMySQL": "ParValue-fromMySQL", "SomeStatus": "Waiting"}}, ) res = jobMonitoringClient.getJobAttributes(jobID) assert res["OK"], res["Message"] # changing to use the ES flag updateFlag() # So now we are using the ES backend # This will still be in MySQL, but first it will look if it's in ES res = jobMonitoringClient.getJobParameter(jobID, "ParName-fromMySQL") assert res["OK"], res["Message"] assert res["Value"] == {"ParName-fromMySQL": "ParValue-fromMySQL"}, res["Value"] # Now we insert (in ES) res = jobStateUpdateClient.setJobParameter(jobID, "ParName-fromES", "ParValue-fromES") time.sleep(2) # sleep to give time to ES to index assert res["OK"], res["Message"] res = jobMonitoringClient.getJobParameter(jobID, "ParName-fromES") # This will be in ES assert res["OK"], res["Message"] assert res["Value"] == {"ParName-fromES": "ParValue-fromES"}, res["Value"] res = jobMonitoringClient.getJobOwner(jobID) assert res["OK"], res["Message"] assert res["Value"] == "adminusername", res["Value"] # These parameters will be looked up in MySQL and in ES, and combined res = jobMonitoringClient.getJobParameters(jobID) assert res["OK"], res["Message"] assert res["Value"] == { jobID: {"ParName-fromMySQL": "ParValue-fromMySQL", "SomeStatus": "Waiting", "ParName-fromES": "ParValue-fromES"} }, res["Value"] # Do it again res = jobMonitoringClient.getJobParameters(jobID) assert res["OK"], res["Message"] assert res["Value"] == { jobID: {"ParName-fromMySQL": "ParValue-fromMySQL", "SomeStatus": "Waiting", "ParName-fromES": "ParValue-fromES"} }, res["Value"] # this is updating an existing parameter, but in practice it will be in ES only, # while in MySQL the old status "Waiting" will stay res = jobStateUpdateClient.setJobsParameter({jobID: ["SomeStatus", "Matched"]}) time.sleep(2) # sleep to give time to ES to index assert res["OK"], res["Message"] res = jobMonitoringClient.getJobParameters(jobID) assert res["OK"], res["Message"] assert res["Value"][jobID]["SomeStatus"] == "Matched", res["Value"] # again updating the same parameter res = jobStateUpdateClient.setJobsParameter({jobID: ["SomeStatus", "Running"]}) time.sleep(2) # sleep to give time to ES to index assert res["OK"], res["Message"] res = jobMonitoringClient.getJobParameters(jobID) assert res["OK"], res["Message"] assert res["Value"][jobID]["SomeStatus"] == "Running", res["Value"] # Now we create a second job secondJobID = createJob() res = jobMonitoringClient.getJobParameter(secondJobID, "ParName-fromMySQL") assert res["OK"], res["Message"] # Now we insert (in ES) res = jobStateUpdateClient.setJobParameter(secondJobID, "ParName-fromES-2", "ParValue-fromES-2") time.sleep(2) # sleep to give time to ES to index assert res["OK"], res["Message"] res = jobMonitoringClient.getJobParameter(secondJobID, "ParName-fromES-2") # This will be in ES assert res["OK"], res["Message"] assert res["Value"] == {"ParName-fromES-2": "ParValue-fromES-2"}, res["Value"] # These parameters will be looked up in MySQL and in ES, and combined res = jobMonitoringClient.getJobParameters([jobID, secondJobID]) assert res["OK"], res["Message"] assert res["Value"] == { jobID: { "ParName-fromMySQL": "ParValue-fromMySQL", "SomeStatus": "Running", "ParName-fromES": "ParValue-fromES", }, secondJobID: {"ParName-fromES-2": "ParValue-fromES-2"}, }, res["Value"] # These parameters will be looked up in MySQL and in ES, and combined res = jobMonitoringClient.getJobParameters([jobID, secondJobID], "SomeStatus") assert res["OK"], res["Message"] assert res["Value"][jobID] == {"SomeStatus": "Running"}, res["Value"] res = jobMonitoringClient.getJobAttributes(jobID) # these will still be all in MySQL assert res["OK"], res["Message"]
DIRACGrid/DIRAC
tests/Integration/WorkloadManagementSystem/Test_JobParameters_MySQLandES.py
Python
gpl-3.0
7,355
[ "DIRAC" ]
c8940da49bf094e98ac5978a2db1748339f5f16676591531c4c3e1c889ecc3c9
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' ********************************************************** * * SpectraLearnPredict2 - SKlearn Neural Networks * Perform Machine Learning on Spectroscopy Data. * * Uses: Deep Neural Networks, TensorFlow, SVM, PCA, K-Means * * By: Nicola Ferralis <feranick@hotmail.com> * *********************************************************** ''' import matplotlib if matplotlib.get_backend() == 'TkAgg': matplotlib.use('Agg') import numpy as np import sys, os.path, getopt, glob, csv import random, time, configparser, os from os.path import exists, splitext from os import rename from datetime import datetime, date from sklearn import preprocessing from .slp_config import * #******************************************************************************** ''' MultiLayer Perceptron - SKlearn ''' ''' http://scikit-learn.org/stable/modules/neural_networks_supervised.html''' #******************************************************************************** ''' Train Neural Network - sklearn ''' #******************************************************************************** def trainNN(A, Cl, A_test, Cl_test, Root): from sklearn.neural_network import MLPClassifier, MLPRegressor from sklearn.externals import joblib if nnDef.MLPRegressor is False: Root+"/DNN-TF_" nnTrainedData = Root + '.nnModelC.pkl' else: nnTrainedData = Root + '.nnModelR.pkl' model_le = Root + '.nnLabelEnc.pkl' le = preprocessing.LabelEncoder() print('==========================================================================\n') print('\033[1m Running Neural Network: multi-layer perceptron (MLP)\033[0m') print(' Hidden layers with neuron count:', nnDef.hidden_layers) print(' Optimizer:',nnDef.optimizer,', Activation Fn:',nnDef.activation_function, ', L2 reg. strength: ',nnDef.l2_reg_strength) try: if nnDef.alwaysRetrain == False: with open(nnTrainedData): print(' Opening NN training model...\n') clf = joblib.load(nnTrainedData) le = joblib.load(model_le) else: raise ValueError(' Force NN retraining.') except: #********************************************** ''' Retrain training data if not available''' #********************************************** if nnDef.MLPRegressor is False: print(' Retraining NN model using MLP Classifier...') clf = MLPClassifier(solver=nnDef.optimizer, alpha=nnDef.l2_reg_strength, activation = nnDef.activation_function, hidden_layer_sizes=nnDef.hidden_layers, random_state=1) else: print(' Retraining NN model using MLP Regressor...') clf = MLPRegressor(solver=nnDef.optimizer, alpha=nnDef.l2_reg_strength, hidden_layer_sizes=nnDef.hidden_layers, random_state=1) Cl = np.array(Cl,dtype=float) totA = np.vstack((A, A_test)) totCl = np.append(Cl, Cl_test) totCl2 = le.fit_transform(totCl) Cl2 = le.transform(Cl) Cl2_test = le.transform(Cl_test) clf.fit(A, Cl2) print(" Training on the full training dataset\n") accur = clf.score(A_test,Cl2_test) if nnDef.MLPRegressor is False: print(' Accuracy: ',100*accur,'%\n Loss: {:.5f}'.format(clf.loss_),'\n') else: print(' Coefficient of determination R^2: ',accur, '\n Loss: {:.5f}'.format(clf.loss_),'\n') joblib.dump(clf, nnTrainedData) joblib.dump(le, model_le) return clf, le #******************************************************************************** ''' Evaluate Neural Network - sklearn ''' #******************************************************************************** def predNN(clf, A, Cl, R, le): if nnDef.MLPRegressor is False: prob = clf.predict_proba(R)[0].tolist() rosterPred = np.where(clf.predict_proba(R)[0]>nnDef.thresholdProbabilityPred/100)[0] print('\n ==============================') print(' \033[1mNN\033[0m - Probability >',str(nnDef.thresholdProbabilityPred),'%') print(' ==============================') print(' Prediction\tProbability [%]') for i in range(rosterPred.shape[0]): print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.4f}'.format(100*clf.predict_proba(R)[0][rosterPred][i]))) print(' ==============================') R_pred = clf.predict(R)[0] if R_pred.size >0: predValue = le.inverse_transform(R_pred) else: predValue = 0 predProb = round(100*max(prob),4) print('\033[1m' + '\n Predicted classifier value (Deep Neural Networks - sklearn) = ' + str(predValue) + ' (probability = ' + str(predProb) + '%)\033[0m\n') else: Cl = np.array(Cl,dtype=float) predValue = clf.predict(R)[0] predProb = clf.score(A,Cl) print('\033[1m' + '\n Predicted regressor value (Deep Neural Networks - sklearn) = ' + str('{:.3f}'.format(predValue)) + ' (R^2 = ' + str('{:.5f}'.format(predProb)) + ')\033[0m\n') #************************************** ''' Neural Networks Classification Report ''' #************************************** if nnDef.nnClassReport: print(' Neural Networks Classification Report\n') runClassReport(clf, A, Cl) #************************* ''' Plot probabilities ''' #************************* if plotDef.showProbPlot: if nnDef.MLPRegressor is False: plotProb(clf, R) return predValue, predProb
feranick/SpectralMachine
Archive/SpectraLearnPredict2/SpectraLearnPredict2/slp/slp_nn.py
Python
gpl-3.0
5,809
[ "NEURON" ]
a52e589496ed2be4ff99095d09d2238ac9b9b81c028657e6d0669f58c291ccca
# Copyright 2020 Tensorforce Team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import tensorflow as tf from tensorforce.core import Module, SignatureDict, TensorSpec, TensorsSpec, tf_function, tf_util class Optimizer(Module): """ Base class for optimizers. Args: name (string): (<span style="color:#0000C0"><b>internal use</b></span>). arguments_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>. """ def __init__(self, *, name=None, arguments_spec=None): super().__init__(name=name) self.arguments_spec = arguments_spec self.is_initialized_given_variables = False def initialize_given_variables(self, *, variables): assert not self.root.is_initialized and not self.is_initialized_given_variables for module in self.this_submodules: if isinstance(module, Optimizer): module.initialize_given_variables(variables=variables) # Replace "/" with "_" to ensure TensorDict is flat self.variables_spec = TensorsSpec(((var.name[:-2].replace('/', '_'), TensorSpec( type=tf_util.dtype(x=var, fallback_tf_dtype=True), shape=tf_util.shape(x=var) )) for var in variables)) self.is_initialized_given_variables = True if self.config.create_debug_assertions: self.is_initialized = False for variable in variables: self.zero_check_history = self.variable( name='zero_check_history', spec=TensorSpec(type='bool', shape=(3, len(variables))), initializer='zeros', is_trainable=False, is_saved=False ) self.zero_check_index = self.variable( name='zero_check_index', spec=TensorSpec(type='int', shape=()), initializer='zeros', is_trainable=False, is_saved=False ) self.is_initialized = True def input_signature(self, *, function): if function == 'step' or function == 'update': return SignatureDict(arguments=self.arguments_spec.signature(batched=True)) else: return super().input_signature(function=function) def output_signature(self, *, function): if function == 'step': return self.variables_spec.fmap( function=(lambda spec: spec.signature(batched=False)), cls=SignatureDict ) elif function == 'update': return SignatureDict( singleton=TensorSpec(type='bool', shape=()).signature(batched=False) ) else: return super().output_signature(function=function) @tf_function(num_args=1) def step(self, *, arguments, variables, **kwargs): raise NotImplementedError @tf_function(num_args=1) def update(self, *, arguments, variables, **kwargs): assert self.is_initialized_given_variables assert all(variable.dtype.is_floating for variable in variables) deltas = self.step(arguments=arguments, variables=variables, **kwargs) operations = list(deltas) if self.config.create_debug_assertions: from tensorforce.core.optimizers import DoublecheckStep, NaturalGradient, \ Synchronization, UpdateModifier optimizer = self while isinstance(optimizer, UpdateModifier): if isinstance(optimizer, DoublecheckStep): break optimizer = optimizer.optimizer if not isinstance(optimizer, DoublecheckStep) and ( not isinstance(optimizer, NaturalGradient) or not optimizer.only_positive_updates ) and (not isinstance(self, Synchronization) or self.sync_frequency is None): false = tf_util.constant(value=False, dtype='bool') zero = tf_util.constant(value=0, dtype='int') one = tf_util.constant(value=1, dtype='int') zero_float = tf_util.constant(value=0.0, dtype='float') for index, (delta, variable) in enumerate(zip(deltas, variables)): if '_distribution/mean/linear/' in variable.name: # Gaussian.state_value does not use mean continue is_zero = tf.math.logical_and( x=tf.math.equal(x=tf.math.count_nonzero( input=delta, dtype=tf_util.get_dtype(type='int') ), y=zero), y=tf.reduce_any(input_tensor=tf.math.not_equal( x=arguments['reward'], y=zero_float )) ) index = tf_util.constant(value=index, dtype='int', shape=(1,)) index = tf.stack(values=( tf.expand_dims(input=self.zero_check_index, axis=0), index ), axis=1) operations.append(tf.tensor_scatter_nd_update( tensor=self.zero_check_history, indices=index, updates=tf.expand_dims(input=is_zero, axis=0) )) operations.append(tf.debugging.assert_equal( x=tf.math.reduce_any(input_tensor=tf.math.reduce_all( input_tensor=self.zero_check_history, axis=1 ), axis=0), y=false )) with tf.control_dependencies(control_inputs=operations): operations = [self.zero_check_index.assign(value=tf.math.mod(x=one, y=3))] with tf.control_dependencies(control_inputs=operations): dependencies = list() if self.root.summaries == 'all' or 'update-norm' in self.root.summaries: with self.root.summarizer.as_default(): x = tf.linalg.global_norm( t_list=[tf_util.cast(x=delta, dtype='float') for delta in deltas] ) dependencies.append( tf.summary.scalar(name='update-norm', data=x, step=self.root.updates) ) if self.root.summaries == 'all' or 'updates' in self.root.summaries: with self.root.summarizer.as_default(): for var in variables: assert var.name.startswith(self.root.name + '/') and var.name[-2:] == ':0' mean_name = var.name[len(self.root.name) + 1: -2] + '-mean' var_name = var.name[len(self.root.name) + 1: -2] + '-variance' mean, variance = tf.nn.moments(x=var, axes=list(range(tf_util.rank(x=var)))) dependencies.append( tf.summary.scalar(name=mean_name, data=mean, step=self.root.updates) ) dependencies.append( tf.summary.scalar(name=var_name, data=variance, step=self.root.updates) ) with tf.control_dependencies(control_inputs=dependencies): return tf_util.identity(input=tf_util.constant(value=True, dtype='bool'))
reinforceio/tensorforce
tensorforce/core/optimizers/optimizer.py
Python
apache-2.0
7,845
[ "Gaussian" ]
536a5bfd9947549b8c131285db530befbc5703595ffd028e9cd7f81ac6d16465
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ************************* **espressopp.Exceptions** ************************* .. function:: espressopp.Error(msg) :param msg: :type msg: .. function:: espressopp.ParticleDoesNotExistHere(msg) :param msg: :type msg: .. function:: espressopp.UnknownParticleProperty(msg) :param msg: :type msg: .. function:: espressopp.MissingFixedPairList(msg) :param msg: :type msg: """ import sys, traceback class Error(Exception): def __init__(self, msg): try: raise Exception except: file, lineno, module, line = traceback.extract_stack()[0] self.msg = 'ERROR while executing ' + str(file) + ' line ' + str(lineno) + ': ' + str(line) + '\n-> ' + msg def __str__(self) : return self.msg def __repr__(self) : return str(self) class ParticleDoesNotExistHere(Exception): def __init__(self, msg): try: raise Exception except: self.msg = msg def __str__(self) : return self.msg def __repr__(self) : return str(self) class UnknownParticleProperty(Exception): def __init__(self, msg): try: raise Exception except: self.msg = msg def __str__(self) : return self.msg def __repr__(self) : return str(self) class MissingFixedPairList(Exception): def __init__(self, msg): try: raise Exception except: self.msg = msg def __str__(self) : return self.msg def __repr__(self) : return str(self)
capoe/espressopp.soap
src/Exceptions.py
Python
gpl-3.0
2,468
[ "ESPResSo" ]
f1468d1bd0206831cb9c1fad4cb948155c133ad329f116346e8648d97b54559d
""" utilities.py """ import os import tarfile from scipy.spatial import cKDTree import numpy as np from shutil import rmtree, copyfile from ConfigParser import SafeConfigParser from netCDF4 import Dataset from logging import getLogger from log import LOG_NAME from share import TIMESTAMPFORM, RPOINTER, EARTHRADIUS, METERSPERMILE from share import METERS2PERACRE, METERSPERKM, VALID_CHARS from config import read_config # -------------------------------------------------------------------- # # create logger log = getLogger(LOG_NAME) # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # find x y coordinates def latlon2yx(plats, plons, glats, glons): """find y x coordinates """ # use astronomical conventions for longitude # (i.e. negative longitudes to the east of 0) if (glons.max() > 180): posinds = np.nonzero(glons > 180) glons[posinds] -= 360 log.info('adjusted grid lon to astronomical conventions') if (plons.max() > 180): posinds = np.nonzero(plons > 180) plons[posinds] -= 360 log.info('adjusted point lon to astronomical conventions') if glons.ndim == 1 or glats.ndim == 1: glons, glats = np.meshgrid(glons, glats) combined = np.dstack(([glats.ravel(), glons.ravel()]))[0] points = list(np.vstack((np.array(plats), np.array(plons))).transpose()) mytree = cKDTree(combined) dist, indexes = mytree.query(points, k=1) y, x = np.unravel_index(indexes, glons.shape) return y, x # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Search neighboring grid cells for channel def search_for_channel(source_area, routys, routxs, search=2, tol=10): """Search neighboring grid cells for channel""" log.debug('serching for channel') new_ys = np.empty_like(routys) new_xs = np.empty_like(routxs) for i, (y, x) in enumerate(zip(routys, routxs)): area0 = source_area[y, x] search_area = source_area[y-search:y+search+1, x-search:x+search+1] if np.any(search_area > area0*tol): sy, sx = np.unravel_index(search_area.argmax(), search_area.shape) new_ys[i] = y + sy - search new_xs[i] = x + sx - search log.debug('Moving pour point to channel y: ' '{0}->{1}, x: {2}->{3}'.format(y, new_ys[i], x, new_xs[i])) log.debug('Source Area has increased from {0}' ' to {1}'.format(area0, source_area[new_ys[i], new_xs[i]])) else: new_ys[i] = y new_xs[i] = x return new_ys, new_xs # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Write rpointer file def write_rpointer(restart_dir, restart_file, timestamp): """ Write a configuration file with restart file and time """ rpointer_file = os.path.join(restart_dir, RPOINTER) config = SafeConfigParser() config.optionxform = str time_str = timestamp.strftime(TIMESTAMPFORM) config.add_section('RESTART') config.set('RESTART', 'FILE_NAME', os.path.join(restart_dir, restart_file)) config.set('RESTART', 'TIMESTAMP', time_str) with open(rpointer_file, 'w') as configfile: config.write(configfile) return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # A helper function to read a netcdf file def read_netcdf(nc_file, variables=None, coords=None): """ Read data from input netCDF. Will read all variables if none provided. Will also return all variable attributes. Both variables (data and attributes) are returned as dictionaries named by variable """ f = Dataset(nc_file, 'r') if not variables: variables = f.variables.keys() if not coords: coords = slice(None) log.debug('Reading input data variables: %s, from file: %s', variables, nc_file) d = {} a = {} g = {} for var in variables: d[var] = f.variables[var][coords] a[var] = f.variables[var].__dict__ for attr in f.ncattrs(): g[attr] = getattr(f, attr) f.close() return d, a, g # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Check to make sure all the expected variables are present in the dictionary def check_ncvars(config_section, nckeys): """ Make sure the variables listed in the config file are present in the netcdf """ for key, value in config_section.iteritems(): if key.endswith('var'): if value not in nckeys: log.error('%s (%s) not in %s', value, key, config_section['FILE_NAME']) raise NameError('Check netcdf that netcdf variable names match' ' those in the configuration file') return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Find the index of the the nearest value def find_nearest(array, value): """ Find the index location in (array) with value nearest to (value)""" return np.abs(array-value).argmin() # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Delete all the files in a directory def clean_dir(directory): """ Clean all files in a directory""" for file_name in os.listdir(directory): file_path = os.path.join(directory, file_name) try: if os.path.isfile(file_path): os.unlink(file_path) except: log.exception('Error cleaning file: %s' % file_path) return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Delete a particular file def clean_file(file_name): """ Delete the file""" try: if os.path.isfile(file_name): os.unlink(file_name) except: log.exception('Error cleaning file: %s' % file_name) return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Make a set of directories def make_directories(rundir, subdir_names): """Make rvic directory structure""" if not os.path.exists(rundir): os.makedirs(rundir) paths = {} for s in subdir_names: paths[s] = os.path.join(rundir, s) if not os.path.exists(paths[s]): os.makedirs(paths[s]) return paths # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Move all the input files to a central location def copy_inputs(config_file, InputsDir): config_dict = read_config(config_file) config = SafeConfigParser() config.optionxform = str config.read(config_file) new_config = os.path.join(InputsDir, os.path.split(config_file)[1]) # ---------------------------------------------------------------- # # copy the inputs for key, section in config_dict.iteritems(): if 'FILE_NAME' in section.keys(): new_file_name = os.path.join(InputsDir, os.path.split(section['FILE_NAME'])[1]) copyfile(section['FILE_NAME'], new_file_name) # update the config file for an easy restart config.set(key, 'FILE_NAME', os.path.join(InputsDir, os.path.split(section['FILE_NAME'])[1])) # update the config_dict with the new value config_dict[key]['FILE_NAME'] = new_file_name # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # write the new configuration file with open(new_config, 'w') as configfile: config.write(configfile) # ---------------------------------------------------------------- # return config_dict # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def tar_inputs(inputs, suffix='', tar_type='tar'): """ Tar the inputss directory or file at the end of a run""" # ---------------------------------------------------------------- # # Make the TarFile if tar_type == 'tar': end = '.tar' mode = 'w:' elif tar_type in ['tgz', 'tar.gz', 'gunzip']: end = '.tgz' mode = 'w:' else: log.warning('Unknown tar_type: %s, proceeding with gunzipped mode', tar_type) end = '.tgz' mode = 'w:' tar_file = inputs + suffix + end log.info('tarfile: %s', tar_file) if os.path.isdir(inputs): arcname = os.path.basename(os.path.normpath(inputs)) else: arcname = os.path.split(inputs)[1] with tarfile.open(tar_file, mode) as tar: tar.add(inputs, arcname=arcname) # ---------------------------------------------------------------- # # Check to make sure the TarFile exists before deleting the sources if os.path.exists(tar_file): # ------------------------------------------------------------ # # Remove the inputs if os.path.isdir(inputs): rmtree(inputs) elif os.path.isfile(inputs): os.unlink(inputs) # ------------------------------------------------------------ # else: log.error('Problem removing inputs: %s', inputs) # ---------------------------------------------------------------- # return tar_file # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Read the domain def read_domain(domain_dict, lat0_is_min=False): """ Read the domain file and return all the variables and attributes. Area is returned in m2 """ dom_data, dom_vatts, dom_gatts = read_netcdf(domain_dict['FILE_NAME']) check_ncvars(domain_dict, dom_data.keys()) # ---------------------------------------------------------------- # # Create the cell_ids variable dom_mask = domain_dict['LAND_MASK_VAR'] temp = np.arange(dom_data[dom_mask].size) dom_data['cell_ids'] = temp.reshape(dom_data[dom_mask].shape) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Make sure the longitude / latitude vars are 2d dom_lat = domain_dict['LATITUDE_VAR'] dom_lon = domain_dict['LONGITUDE_VAR'] dom_data['cord_lons'] = dom_data[dom_lon][:] dom_data['cord_lats'] = dom_data[dom_lat][:] if dom_data[dom_lon].ndim == 1: # ------------------------------------------------------------- # # Check latitude order, flip if necessary. if (dom_data[dom_lat][-1] > dom_data[dom_lat][0]) != lat0_is_min: log.debug('Domain Inputs came in upside down, flipping everything ' 'now.') var_list = dom_data.keys() var_list.remove(dom_lon) for var in var_list: dom_data[var] = np.flipud(dom_data[var]) # ------------------------------------------------------------ # # ------------------------------------------------------------- # # Make 2d coordinate vars dom_data[dom_lon], dom_data[dom_lat] = np.meshgrid(dom_data[dom_lon], dom_data[dom_lat]) # ------------------------------------------------------------- # # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Make sure the area is in m2 dom_area = domain_dict['AREA_VAR'] area_units = dom_vatts[dom_area]['units'] if area_units in ["rad2", "radians2", "radian2", "radian^2", "rad^2", "radians^2", "rads^2", "radians squared", "square-radians"]: dom_data[dom_area] = dom_data[dom_area]*EARTHRADIUS*EARTHRADIUS elif area_units in ["m2", "m^2", "meters^2", "meters2", "square-meters", "meters squared"]: dom_data[dom_area] = dom_data[dom_area] elif area_units in ["km2", "km^2", "kilometers^2", "kilometers2", "square-kilometers", "kilometers squared"]: dom_data[dom_area] = dom_data[dom_area]*METERSPERKM*METERSPERKM elif area_units in ["mi2", "mi^2", "miles^2", "miles", "square-miles", "miles squared"]: dom_data[dom_area] = dom_data[dom_area]*METERSPERMILE*METERSPERMILE elif area_units in ["acres", "ac", "ac."]: dom_data[dom_area] = dom_data[dom_area]*METERS2PERACRE else: log.warning("WARNING: UNKNOWN AREA units (%s), ASSUMING THEY ARE IN " "SQUARE METERS", dom_data[domain_dict['AREA_VAR']]['units']) # ---------------------------------------------------------------- # return dom_data, dom_vatts, dom_gatts # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def strip_non_ascii(in_string): ''' Returns the string without non ASCII characters''' stripped = (c for c in in_string if 0 < ord(c) < 127) return ''.join(stripped) # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def strip_invalid_char(in_string): ''' Returns the string without invalid characters for filenames''' return ''.join(c for c in in_string if c in VALID_CHARS) # -------------------------------------------------------------------- #
bartnijssen/RVIC
rvic/core/utilities.py
Python
gpl-3.0
14,411
[ "NetCDF" ]
ff1bc0582d11bd83be71adfca2b74cd480d1f08415ce018096007560115c93f1
import logging import os import numpy as np from netCDF4 import Dataset import validation_utils from data_decoders.bin_reader import BINFileReader from data_decoders.sig_reader import SIGFileReader from data_decoders.sigrid_decoder import DecodeSIGRIDCodes LOG = logging.getLogger(__name__) # logging.basicConfig(level=logging.DEBUG, # format='[%(levelname)s: %(asctime)s: %(name)s] %(message)s', # datefmt='%Y-%m-%d %H:%M:%S') def handle_shapefile(shp_file, orig_file, orig_data, temp_files): """ This function reprojects, rasterizes, and decodes NIC ice charts in shapefile format. :param shp_file: :param orig_file: :return: """ # reproject shapefile: target_area_def = validation_utils.get_area_def(orig_file) proj_string = target_area_def.proj4_string reproj_filename = 'RE_{0}'.format(os.path.basename(shp_file)) reproj_filename = os.path.join(os.path.dirname(shp_file), reproj_filename) cmd = 'ogr2ogr -f "ESRI Shapefile" -t_srs "{0}" {1} {2}' cmd = cmd.format(proj_string, reproj_filename, shp_file) try: LOG.info('Reprojecting shapefile to {0}'.format(shp_file)) LOG.info('Executing: {0}'.format(cmd)) os.system(cmd) except: raise Exception('ogr2ogr must be installed...') temp_files.append([reproj_filename, reproj_filename.replace('.shp', '.shx'), reproj_filename.replace('.shp', '.dbf'), reproj_filename.replace('.shp', '.prj')]) # rasterize/grid shapefile: layer = os.path.basename(reproj_filename).replace('.shp', '') area_extent = str(target_area_def.area_extent).strip('()') x_size = target_area_def.x_size y_size = target_area_def.y_size netcdf_file = reproj_filename.replace('.shp', '.nc') command = 'gdal_rasterize -l {0} -of NetCDF -init 200 -a_nodata 200 ' \ '-where "CT IS NOT NULL" -te {1} -ts {2} {3} -ot Byte ' \ '-a CT {4} {5}'.format(layer, area_extent, x_size, y_size, reproj_filename, netcdf_file) try: # call the actual conversion to NetCDF file LOG.info('Rasterizing shapefile to {0}'.format(netcdf_file)) LOG.info('Executing: {0}'.format(cmd)) os.system(command) except: raise Exception('gdal_rasterize must be installed...') temp_files.append(netcdf_file) # read NetCDF file dataset = Dataset(netcdf_file) # on my computer the image needs to be flipped upside down... # TODO: check if this is also necessary on other computers eval_data = np.flipud(dataset.variables['Band1'][:]) #.astype(np.uint8)) # finally convert the sigrid ice codes to ice concentrations in % decoder = DecodeSIGRIDCodes() eval_data = decoder.sigrid_decoding(eval_data, orig_data) return eval_data def handle_binfile(bin_file, orig_file, orig_data): bin_reader = BINFileReader() eval_file_data = bin_reader.read_data(bin_file, orig_file) decoder = DecodeSIGRIDCodes() eval_data = decoder.decode_values(eval_file_data, orig_data) return eval_data def handle_sigfile(sig_file, orig_file, orig_data): sig_reader = SIGFileReader() eval_file_data = sig_reader.read_data(sig_file, orig_file) decoder = DecodeSIGRIDCodes() eval_data = decoder.sigrid_decoding(eval_file_data, orig_data) return eval_data def handle_osi_ice_conc_nc_file(input_file): """ This function reads the variable 'ice_conc' from an ice concentration product in NetCDF format. It filters out all values, which are, based on the field 'status_flag', not a proper ice concentration value. :param input_file: str Path to an ice concentration product in NetCDF product. :return: np.array|np.ma.array The 'matrix' of ice concentration values. It is expected for this validation that the values are in the range of [0..100] """ dataset = Dataset(input_file) ice_conc = dataset.variables['ice_conc'][0].data[:] status_flag = dataset.variables['status_flag'][0][:] mask_flags = np.logical_or.reduce((status_flag & 1 == 1, status_flag & 2 == 2, status_flag & 8 == 8)) mask_conc = np.logical_or(ice_conc < 0, ice_conc > 100) ice_conc = np.ma.array(ice_conc, mask=(mask_flags | mask_conc)) return ice_conc
HelgeDMI/trollvalidation
trollvalidation/data_preparation.py
Python
apache-2.0
4,440
[ "NetCDF" ]
b99c461b51b36d4d5c6fbdba83d92f1766de0ab7fb6e0900cc6dc6c3f0dc1fe6
# stageDefaults contains the default options which are applied to each stage (command). # This section is required for every Rubra pipeline. # These can be overridden by options defined for individual stages, below. # Stage options which Rubra will recognise are: # - distributed: a boolean determining whether the task should be submitted to a cluster # job scheduling system (True) or run on the system local to Rubra (False). # - walltime: for a distributed PBS job, gives the walltime requested from the job # queue system; the maximum allowed runtime. For local jobs has no effect. # - memInGB: for a distributed PBS job, gives the memory in Gigabytes requested from the # job queue system. For local jobs has no effect. # - queue: for a distributed PBS job, this is the name of the queue to submit the # job to. For local jobs has no effect. This is currently a mandatory field for # distributed jobs, but can be set to None. # - modules: the modules to be loaded before running the task. This is intended for # systems with environment modules installed. Rubra will call module load on each # required module before running the task. Note that defining modules for individual # stages will override (not add to) any modules listed here. This currently only # works for distributed jobs. stageDefaults = { 'distributed': True, 'queue': None, 'walltime': "01:00:00", 'memInGB': 8, 'modules': [ "bwa-intel/0.7.5a", "samtools-intel/0.1.19", "picard/1.53", "python-gcc/2.7.5", "R-gcc/3.0.2", "gatk/1.6-7" ] } # stages should hold the details of each stage which can be called by runStageCheck. # This section is required for every Rubra pipeline. # Calling a stage in this way carries out checkpointing and, if desired, batch job # submission. # Each stage must contain a 'command' definition. See stageDefaults above for other # allowable options. stages = { "fastqc": { "command": "fastqc --quiet -o %outdir %seq", 'modules': [ "fastqc/0.10.1" ] }, 'bwaMemSE': { 'command': "bwa mem -t 8 %meta %ref %seq > %out", 'walltime': "3:00:00", 'queue': 'smp', 'memInGB': 23 }, 'bwaMemPE': { 'command': "bwa mem -t 8 %meta %ref %seq1 %seq2 > %out", 'walltime': "3:00:00", 'queue': 'smp', 'memInGB': 23 }, 'samToSortedBam': { 'command': "./SortSam 6 VALIDATION_STRINGENCY=LENIENT INPUT=%seq OUTPUT=%out SORT_ORDER=coordinate", 'walltime': "5:00:00", }, 'mergeBams': { 'command': "./PicardMerge 6 %baminputs USE_THREADING=true VALIDATION_STRINGENCY=LENIENT AS=true OUTPUT=%out", 'walltime': "5:00:00" }, 'indexBam': { 'command': "samtools index %bam" }, 'flagstat': { 'command': "samtools flagstat %bam > %out", 'walltime': "00:10:00" }, 'igvcount': { 'command': "igvtools count %bam %out hg19", 'modules': [ "igvtools/1.5.15" ] }, 'indexVCF': { 'command': "./vcftools_prepare.sh %vcf", 'modules': [ "tabix/0.2.5" ] }, 'realignIntervals': { # Hard-coded to take 2 known indels files right now 'command': "./GenomeAnalysisTK 1 -T RealignerTargetCreator -R %ref -I %bam --known %indels_goldstandard --known %indels_1000G -log %log -o %out", 'memInGB': 23, 'walltime': "5:00:00" }, 'realign': { 'command': "./GenomeAnalysisTK 22 -T IndelRealigner -R %ref -I %bam -targetIntervals %intervals -log %log -o %out", 'memInGB': 23, 'walltime': "5:00:00" }, 'dedup': { 'command': "./MarkDuplicates 6 INPUT=%bam REMOVE_DUPLICATES=true VALIDATION_STRINGENCY=LENIENT AS=true METRICS_FILE=%log OUTPUT=%out", 'walltime': '5:00:00' }, 'baseQualRecalCount': { 'command': "./GenomeAnalysisTK 12 -T CountCovariates -I %bam -R %ref --knownSites %dbsnp -nt 8 -l INFO -cov ReadGroupCovariate -cov QualityScoreCovariate -cov CycleCovariate -cov DinucCovariate -log %log -recalFile %out", 'queue': 'smp', 'memInGB': 23, 'walltime': "5:00:00" }, 'baseQualRecalTabulate': { 'command': "./GenomeAnalysisTK 4 -T TableRecalibration -I %bam -R %ref -recalFile %csvfile -l INFO -log %log -o %out", 'walltime': "5:00:00" }, 'callSNPs': { 'command': "./GenomeAnalysisTK 12 -T UnifiedGenotyper -nt 8 -R %ref -I %bam --dbsnp %dbsnp -stand_call_conf 50.0 -stand_emit_conf 10.0 -dcov 1600 -l INFO -A AlleleBalance -A DepthOfCoverage -A FisherStrand -glm SNP -log %log -o %out", 'queue': 'smp', 'memInGB': 23, 'walltime': "3:00:00" }, 'callIndels': { 'command': "./GenomeAnalysisTK 12 -T UnifiedGenotyper -nt 8 -R %ref -I %bam --dbsnp %dbsnp -stand_call_conf 50.0 -stand_emit_conf 10.0 -dcov 1600 -l INFO -A AlleleBalance -A DepthOfCoverage -A FisherStrand -glm INDEL -log %log -o %out", 'queue': 'smp', 'memInGB': 23, 'walltime': "3:00:00" }, 'filterSNPs': { # Very minimal hard filters based on GATK recommendations. VQSR is preferable if possible. 'command': "./GenomeAnalysisTK 4 -T VariantFiltration -R %ref --variant %vcf --filterExpression 'QD < 2.0 || MQ < 40.0 || FS > 60.0 || HaplotypeScore > 13.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0' --filterName 'GATK_MINIMAL_FILTER' -log %log -o %out", }, 'filterIndels': { # Very minimal hard filters based on GATK recommendations. VQSR is preferable if possible. # If you have 10 or more samples GATK also recommends the filter InbreedingCoeff < -0.8 'command': "./GenomeAnalysisTK 4 -T VariantFiltration -R %ref --variant %vcf --filterExpression 'QD < 2.0 || ReadPosRankSum < -20.0 || FS > 200.0' --filterName 'GATK_MINIMAL_FILTER' -log %log -o %out", }, 'annotateEnsembl': { # This command as written assumes that VEP and its cache have been # downloaded in respective locations # ./variant_effect_predictor_2.5 # ./variant_effect_predictor_2.5/vep_cache 'command': "perl variant_effect_predictor_2.5/variant_effect_predictor.pl --cache --dir variant_effect_predictor_2.5/vep_cache -i %vcf --vcf -o %out -species human --canonical --gene --protein --sift=b --polyphen=b > %log", 'modules': [ "perl/5.10.1", "ensembl/67" ] }, 'depthOfCoverage': { 'command': "./GenomeAnalysisTK 4 -T DepthOfCoverage -R %ref -I %bam -omitBaseOutput -ct 1 -ct 10 -ct 20 -ct 30 -o %out", }, 'collateReadcounts': { 'command': 'python count_flagstat_wgs.py %dir %outdir', 'walltime': "00:10:00" } }
vlsci/variant_calling_pipeline
pipeline_stages_config.py
Python
mit
6,757
[ "BWA" ]
6323a2967d888f93dee15493e6716c41719105567f02a2226d47954ab119293a
#!/usr/bin/env python """ A unittest script for the MicrobiomeAssayPrep module. """ import unittest import json from cutlass import MicrobiomeAssayPrep from CutlassTestConfig import CutlassTestConfig from CutlassTestUtil import CutlassTestUtil # pylint: disable=W0703, C1801 class MicrobiomeAssayPrepTest(unittest.TestCase): """ A unit test class for the MicrobiomeAssayPrep class. """ session = None util = None @classmethod def setUpClass(cls): """ Setup for the unittest. """ # Establish the session for each test method cls.session = CutlassTestConfig.get_session() cls.util = CutlassTestUtil() def testImport(self): """ Test the importation of the MicrobiomeAssayPrep module. """ success = False try: from cutlass import MicrobiomeAssayPrep success = True except Exception: pass self.failUnless(success) self.failIf(MicrobiomeAssayPrep is None) def testSessionCreate(self): """ Test the creation of a MicrobiomeAssayPrep via the session. """ success = False prep = None try: prep = self.session.create_microbiome_assay_prep() success = True except Exception: pass self.failUnless(success) self.failIf(prep is None) def testToJson(self): """ Test the generation of JSON from a MicrobiomeAssayPrep instance. """ prep = self.session.create_microbiome_assay_prep() success = False comment = "test comment" prep.comment = comment prep_json = None try: prep_json = prep.to_json() success = True except Exception: pass self.assertTrue(success, "Able to use 'to_json'.") self.assertTrue(prep_json is not None, "to_json() returned data.") parse_success = False try: prep_data = json.loads(prep_json) parse_success = True except Exception: pass self.assertTrue(parse_success, "to_json() did not throw an exception.") self.assertTrue(prep_data is not None, "to_json() returned parsable JSON.") self.assertTrue('meta' in prep_data, "JSON has 'meta' key in it.") self.assertEqual(prep_data['meta']['comment'], comment, "'comment' in JSON had expected value.") def testId(self): """ Test the id property. """ prep = self.session.create_microbiome_assay_prep() self.assertTrue(prep.id is None, "New template prep has no ID.") with self.assertRaises(AttributeError): prep.id = "test" def testVersion(self): """ Test the version property. """ prep = self.session.create_microbiome_assay_prep() self.assertTrue(prep.version is None, "New template prep has no version.") with self.assertRaises(ValueError): prep.version = "test" def testComment(self): """ Test the comment property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "comment") self.util.stringPropertyTest(self, prep, "comment") def testPrideId(self): """ Test the pride_id property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "pride_id") self.util.stringPropertyTest(self, prep, "pride_id") def testSampleName(self): """ Test the sample_name property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "sample_name") self.util.stringPropertyTest(self, prep, "sample_name") def testTitle(self): """ Test the title property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "title") self.util.stringPropertyTest(self, prep, "title") def testShortLabel(self): """ Test the short_label property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "short_label") self.util.stringPropertyTest(self, prep, "short_label") def testCenter(self): """ Test the center property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "center") self.util.stringPropertyTest(self, prep, "center") def testContact(self): """ Test the contact property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "contact") self.util.stringPropertyTest(self, prep, "contact") def testPrepID(self): """ Test the prep_id property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "prep_id") self.util.stringPropertyTest(self, prep, "prep_id") def testStorageDuration(self): """ Test the storage_duration property. """ prep = self.session.create_microbiome_assay_prep() self.util.intTypeTest(self, prep, "storage_duration") self.util.intPropertyTest(self, prep, "storage_duration") def testExperimentType(self): """ Test the experiment_type property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "experiment_type") self.util.stringPropertyTest(self, prep, "experiment_type") def testSpecies(self): """ Test the species property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "species") self.util.stringPropertyTest(self, prep, "species") def testCellType(self): """ Test the cell_type property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "cell_type") self.util.stringPropertyTest(self, prep, "cell_type") def testTissue(self): """ Test the tissue property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "tissue") self.util.stringPropertyTest(self, prep, "tissue") def testReference(self): """ Test the reference property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "reference") self.util.stringPropertyTest(self, prep, "reference") def testProtocolName(self): """ Test the protocol_name property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "protocol_name") self.util.stringPropertyTest(self, prep, "protocol_name") def testProtocolSteps(self): """ Test the protocol_steps property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "protocol_steps") self.util.stringPropertyTest(self, prep, "protocol_steps") def testExpDescription(self): """ Test the exp_description property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "exp_description") self.util.stringPropertyTest(self, prep, "exp_description") def testSampleDescription(self): """ Test the sample_description property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "sample_description") self.util.stringPropertyTest(self, prep, "sample_description") def testStudy(self): """ Test the study property. """ prep = self.session.create_microbiome_assay_prep() self.util.stringTypeTest(self, prep, "study") self.util.stringPropertyTest(self, prep, "study") def testTags(self): """ Test the tags property. """ prep = self.session.create_microbiome_assay_prep() tags = prep.tags self.assertTrue(type(tags) == list, # pylint: disable=C0123 "MicrobiomeAssayPrep tags() method returns a list.") self.assertEqual(len(tags), 0, "Template prep tags list is empty.") new_tags = ["tagA", "tagB"] prep.tags = new_tags self.assertEqual(prep.tags, new_tags, "Can set tags on a prep.") json_str = prep.to_json() doc = json.loads(json_str) self.assertTrue('tags' in doc['meta'], "JSON representation has 'tags' field in 'meta'.") self.assertEqual(doc['meta']['tags'], new_tags, "JSON representation had correct tags after setter.") def testAddTag(self): """ Test the add_tag() method. """ prep = self.session.create_microbiome_assay_prep() prep.add_tag("test") self.assertEqual(prep.tags, ["test"], "Can add a tag to a prep.") json_str = prep.to_json() doc = json.loads(json_str) self.assertEqual(doc['meta']['tags'], ["test"], "JSON representation had correct tags after add_tag().") # Try adding the same tag yet again, shouldn't get a duplicate with self.assertRaises(ValueError): prep.add_tag("test") json_str = prep.to_json() doc2 = json.loads(json_str) self.assertEqual(doc2['meta']['tags'], ["test"], "JSON document did not end up with duplicate tags.") def testRequiredFields(self): """ Test the required_fields() static method. """ required = MicrobiomeAssayPrep.required_fields() self.assertEqual(type(required), tuple, "required_fields() returns a tuple.") self.assertTrue(len(required) > 0, "required_fields() did not return empty value.") def testLoadSaveDelete(self): """ Extensive test for the load, edit, save and delete functions. """ # Attempt to save the object at all points before and after adding # the required fields prep = self.session.create_microbiome_assay_prep() test_links = {"prepared_from":[]} test_comment = "comment" test_contact = "A contact" test_center = "A center" test_experiment_type = "PRIDE:0000427, Top-down proteomics" test_protocol_steps = "test protocol steps" test_prep_id = "test prep_id" test_pride_id = "test pride_id" test_species = "platypus" test_storage_duration = 13 test_study = "ibd" test_tissue = "blood" test_title = "test title" test_sample_name = "test sample name" self.assertFalse(prep.save(), "Not saved successfully, no required fields") prep.comment = test_comment self.assertFalse(prep.save(), "Not saved successfully") prep.center = test_center self.assertFalse(prep.save(), "Not saved successfully") prep.links = test_links prep.study = test_study self.assertFalse(prep.save(), "Save successfully rejected") prep.contact = test_contact prep.prep_id = test_prep_id prep.pride_id = test_pride_id prep.protocol_steps = test_protocol_steps prep.species = test_species prep.tissue = test_tissue prep.storage_duration = test_storage_duration prep.title = test_title prep.experiment_type = test_experiment_type prep.sample_name = test_sample_name # Make sure visit does not delete if it does not exist with self.assertRaises(Exception): prep.delete() self.assertTrue(prep.save() is True, "Saved successfully") # Load the node that was just saved to OSDF prep_loaded = self.session.create_microbiome_assay_prep() prep_loaded = prep.load(prep.id) # Check all fields were saved and loaded successfully self.assertEqual(prep.comment, prep_loaded.comment, "Comment not saved & loaded successfully") self.assertEqual(prep.contact, prep_loaded.contact, "Contact not saved & loaded successfully") self.assertEqual(prep.center, prep_loaded.center, "Center not saved & loaded successfully") # Node is deleted successfully self.assertTrue(prep.delete(), "Node was not deleted successfully") # The proteome of the initial ID should not load successfully load_test = self.session.create_microbiome_assay_prep() with self.assertRaises(Exception): load_test = load_test.load(prep.id) if __name__ == '__main__': unittest.main()
ihmpdcc/cutlass
tests/test_microb_assay_prep.py
Python
mit
12,901
[ "VisIt" ]
c629e88fc421e9be62a05895930fb1094590f4ab7fd7fb5cc3d59134b83f809b
"""The experimentDB is a web-based application for the storage, organization and communication of experimental data with a focus on molecular biology and biochemical data. This application also stores data regarding reagents, including antibodies, constructs and other biomolecules as well as tracks the distribution of reagents. There is also some preliminary interfaces to other web resources. This project contains several sub-applications as described below: Projects -------- The intent of this app is to co-ordinate specific projects. Projects are intended to be large, grant-sized larger projects in the laboratory. Subprojects are intended to be smaller, potentially paper sized groups of experiments. An experiment can be part of one, none or several projects or subprojects. Data ---- This package defines experiments and the related data associated with them. The Experiment model is the focus of this entire project. It contains details about protocols, notes, reagents and project details. Results are associated with Experiment objects allowing for an Experiment to contain several results. Cloning ------- The cloning app defines the parameters for the synthesis and maintenance of constructs generated as part of an experiment. Constructs can be generated via either cloning or mutagenesis and will result in a Cloning or Mutagenesis object respectively. Proteins -------- The proteins referenced by this application may be targets of an experiment or reagent. This app also contains more detailed information about specific proteins, normally as accessed from public databases using either external databases or through Biopython tools. Reagents -------- The reagents app stores information about all tools used in research, most of which are defined by a particular Experiment object. These include Primer, Cell (cell lines), Antibody, Strain, Chemical and Construct objects. These models are abstract base classes of a superclass ReagentInfo which defines most of the common relevant information. External -------- The idea is to attribute particular models with references regarding external contacts or vendors or to link in specific references important to the experiments or projects. Datasets -------- The datasets app contains data and views for some external databases. This may include external databases accessed directly or with a mirrored internal database. This module is fairly research-interest specific and will likely be removed eventually. """
davebridges/ExperimentDB
experimentdb/__init__.py
Python
bsd-3-clause
2,496
[ "Biopython" ]
596eaa108c742c4858dc8afaad0606a6a55c536e56e117627ec3204616eb666b
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import cdms2 import cdtime import cmor import sys import getopt import factory import numpy from factory.formats import import_equation from Toolbox.ESGFresources import * from Toolbox.ESGFexcel import * from Toolbox.CMORresources import CMORTable # ************************************************************************ # process() # # Convert to obs4MIPS file format. # ************************************************************************ def process( rc ): ''' Convert netcdf/matlab/grads files into CMIP5 format. ''' pdb.set_trace() # ---------------------------- # Loop yearly on file list. # ---------------------------- file_template = rc['file_template'].split(","); if( len(file_template) == 2 ): template_parameter = file_template[1] rc['file_template'] = file_template[0] else: template_parameter = 'years' for year in rc[template_parameter].split(","): if(year == ''): files= os.popen( "ls " + rc['file_template'] ).readlines() else: # ------------------------------------------------ # Use string formating for path with same argument # ------------------------------------------------ try: tmplFile = rc['file_template'] % (year) except: tmplFile = rc['file_template'].format(year) if( not os.path.isfile( tmplFile) ) : print "**** Warning %s not found\n" % ( tmplFile ) continue files= os.popen( "ls " + tmplFile).readlines() if( files == [] ): print "No file found: Check your resource file" return -1 # ------------------------------------------------ # Get the right handler to manage this file format # ------------------------------------------------ Handler = factory.HandlerFormats(files[0].strip()) # ----------------------------------- # Take care of cmor initialization. # ----------------------------------- cmor.setup(inpath=rc['inpath'], netcdf_file_action = cmor.CMOR_REPLACE) cmor.dataset(experiment_id = rc['experiment_id'], institution = rc['institution' ], calendar = rc['calendar' ], institute_id = rc['institute_id' ], model_id = rc['model_id' ], source = rc['source' ], contact = rc['contact' ], references = rc['references' ]) # ----------------------------------------- # add extra Global Attributes for obs4MIPs. # ----------------------------------------- cmor.set_cur_dataset_attribute( 'instrument', rc['instrument' ]) cmor.set_cur_dataset_attribute( 'mip_specs', rc['mip_specs' ]) cmor.set_cur_dataset_attribute( 'data_structure', rc['data_structure']) cmor.set_cur_dataset_attribute( 'source_type', rc['source_type' ]) cmor.set_cur_dataset_attribute( 'source_id', rc['source_id' ]) cmor.set_cur_dataset_attribute( 'realm', rc['realm' ]) cmor.set_cur_dataset_attribute( 'obs_project', rc['obs_project' ]) cmor.set_cur_dataset_attribute( 'processing_version', rc['processing_version'] ) cmor.set_cur_dataset_attribute( 'processing_level', rc['processing_level'] ) cmor.load_table(rc['table']) # --------------------------------------------------------------------- # We loop on each file found, a new cmor file will be create on each # iteration. If you want to aggregate, you need to use Grads ctl file # or NeCDF list of file. # --------------------------------------------------------------------- for file in files: fnm=file.strip() # Get rid of \n aVariable = eval(rc['original_var']) nbVariable = len(aVariable) # ----------------------------------------------------- # ECMWF needs synoptic time 00z and 12z in he filename. # We extract it from the first file in the list. # ----------------------------------------------------- if( rc['source_fn'] == 'SYNOPTIC' ): index = fnm.find('z.') rc['SYNOPTIC'] = fnm[index-2:index] # ----------------------- # Work on all variables # ------------------------- for j in arange(nbVariable): # ---------------------------------------------------- # Fetch the variable directly or excute equation. # ---------------------------------------------------- try: variable=aVariable[j] Handler.open(fnm, variable=variable) rc['cvrt_original_var'] = aVariable[j] print "Working on variable %s " % variable except: if( aVariable[j] != 'equation' ) : print "Variable %s can't open" % variable continue else: print "Executing %s " % eval(rc['equation'])[j] # pdb.set_trace() rc['cvrt_original_units'] = eval(rc['original_units'])[j] rc['cvrt_cmor_var'] = eval(rc['cmor_var'])[j] rc['cvrt_equation'] = eval(rc['equation'])[j] rc['cvrt_level'] = eval(rc['level'])[j] data=Handler.getData() # ---------------------------------------------------------- # Evaluate equation if needed. Usually used to change units # ---------------------------------------------------------- if( rc['cvrt_equation'][0] == '@' ): fncName = rc['cvrt_equation'][1:] fnc = import_equation( "equations.%s" % fncName ) data[:]= fnc(Handler) else: data[:]=eval(rc['cvrt_equation']) # ------------------------------------------------------------- # Save filled value in case data type is changed in createAxes # ------------------------------------------------------------- fill_value = data.fill_value # --------------------------------------------- # Extract latitude/longitude # --------------------------------------------- lonvals=Handler.getLongitude() latvals=Handler.getLatitude() # --------------------- # Create cmor time axis # ---------------------- (rel_time, rel_time_bnds) = createTime(Handler, rc) # --------------------------------------------------- # Create cmor axes and add an axis to data if needed # --------------------------------------------------- (axes, data) = createAxes( rc, latvals, lonvals, data ) axis_ids = list() for axis in axes: axis_id = cmor.axis(**axis) axis_ids.append(axis_id) # ---------------------------------------------------------- # Create cmor variable # Note: since this is in the loop, a new cmor file will be # create for each cmor write command. # ---------------------------------------------------------- varid = cmor.variable(table_entry = rc['cvrt_cmor_var'], axis_ids = axis_ids, history = '', missing_value = fill_value, original_name = rc['cvrt_original_var'], units = rc['cvrt_original_units'] ) # ------------------------------- # Write data for this time frame. # ------------------------------- cmor.write(varid,data,\ time_vals=rel_time,time_bnds=rel_time_bnds) cmor.close(varid,file_name=True) # --------------------------------------- # Rename cmor files according to project. # --------------------------------------- if( movefiles(rc) ): return -2 cmor.close() return 0 # ******************************************************************** # # createTime() # # Define Time and Time bound axes for cmor # # ******************************************************************** def createTime(Handler, rc): ''' InputtimeUnits: specified from resource file or from first file in a list of file. return relative time and time bounds using OutputTimeUnits from resource file. ''' # ---------------------------------------------------- # Retrieve time units from file if not provided in the # resource file. # ---------------------------------------------------- InputTimeUnits = Handler.getTimeUnits(rc['InputTimeUnits']) # -------------------------------------------------------- # Create time relative to January 1st 1900 to facilitate # Threds software file handling. # ------------------------------------------------------- cur_time = Handler.getTime(InputTimeUnits) rel_time =[cur_time[i].torel(rc['OutputTimeUnits']).value for i in range(len(cur_time))] if( len(rel_time) == 1 ) : deltarel = 1 else: deltarel = rel_time[2] - rel_time[1] rel_time_bnds = rel_time[:] rel_time_bnds.append(rel_time[-1]+deltarel) return rel_time, rel_time_bnds # ******************************************************************** # # getCMIP5lev() # # Extract CMIP5 mandatory level and recreate a new data array. # They are 16 mandatory levels. # # ******************************************************************** def getCMIP5lev(data,rc): ''' ''' oTable = CMORTable(rc['inpath'], rc['table'], "plevs") # ---------------------- # Extract spefied levels # ---------------------- if( 'levels' in oTable.dico.keys() ): #pdb.set_trace() dataLevels = data.getLevel()[:] if( data.getLevel().units == "millibars" or data.getLevel().units == "hPa" or data.getLevel().units == "mbar" ): # -------------------------- # Change units for to Pascal # --------------------------- LevelScaleFactor = 100 dataLevels = data.getLevel()[:] * LevelScaleFactor # ---------------------------------------- # No level selected, return all data array # ---------------------------------------- if( len(rc['cvrt_level'].split(":")) == 1 ): levels = [ float(item) for item in dataLevels ] lev=cdms2.createAxis( levels ) lev.designateLevel() lev.units="pa" lev.long_name=data.getLevel().long_name #lev.id="lev" #lev=data.getAxis(1) #lev.__setattr__('_data_',dataLevels.astype(float)) #lev.__setattr__('units',"Pa") #lev.units="hPa" data2=data.pressureRegrid(lev) return data2 if( rc['cvrt_level'].split(':')[1] == "CMIP5" ): lev=cdms2.createAxis( [ float(item/LevelScaleFactor) for item in dataLevels if item in oTable.dico['levels' ] ] ) lev.designateLevel() lev.units="pa" lev.long_name = data.getLevel().long_name data2=data.pressureRegrid(lev) lev[:]=lev[:]*LevelScaleFactor return data2 else: # ----------------------- # Assume a list of values # ----------------------- levels = rc['cvrt_level'].split(':')[1].split(",") # -------------------------- # Change units to Pascal # --------------------------- dataLevels = [ float(rc['cvrt_level'].split(":")[1].split(",")[i]) * \ LevelScaleFactor for i in range(len(levels)) ] # ----------------------------------- # Match dataLevels with CMIP5 levels # Use file units # ----------------------------------- lev=cdms2.createAxis( [ float(item/LevelScaleFactor) for item in dataLevels if item in oTable.dico['levels' ] ] ) # ----------------------------------- # Set axis metadata # ----------------------------------- lev.units="pa" lev.long_name = data.getLevel().long_name lev.designateLevel() # ----------------------------------- # Extract specified levels # ----------------------------------- data2=data.pressureRegrid(lev) # ----------------------------------- # Scale data back # ----------------------------------- lev[:]=lev[:]*LevelScaleFactor return data2 return data # ******************************************************************** # # createAxes() # # Define axes required by cmor and add z axis to data if needed # # ******************************************************************** def createAxes(rc, latvals, lonvals, data): # --------------------------------------------- # Create time/lat/lon axes using a dictionary # --------------------------------------------- axes = [ {'table_entry' : 'time', 'units' : rc['OutputTimeUnits']}, {'table_entry' : 'latitude', 'units' : 'degrees_north', 'coord_vals' : latvals, 'cell_bounds' : latvals.getBounds()}, {'table_entry' : 'longitude', 'units' : 'degrees_east', 'coord_vals' : lonvals, 'cell_bounds' : lonvals.getBounds()}, ] fill_value = data.fill_value if( rc['cvrt_level'] == 'height2m' ): axes.append({'table_entry' : 'height2m', 'units' : 'm', 'coord_vals' : [2.0] }) data = numpy.array(data[:]) data = data[:,:,:,numpy.newaxis] elif( rc['cvrt_level'] != '' ): data = getCMIP5lev( data, rc ) levels=data.getLevel()[:] axes = numpy.insert(axes, 1, {'table_entry' : 'plevs', 'units' : 'Pa', 'coord_vals' : levels }) return axes, data # ******************************************************************** # # usage() # # ******************************************************************** def usage(message): ''' Describe program synopsis. ''' print print "*************************" print message print "*************************" print print print "obs4MIPS_process.py [-h] -r resource" print " resource: File containing Global attributes" print "" print "obs4MIPS will convert an input data file into CMIP5 format using " print "CMOR. A directory path will be creating using CMOR by default or " print "using a template provided in the resource file." print # ******************************************************************** # # main() # # ******************************************************************** def main(): ''' ''' pdb.set_trace() try: opts, args = getopt.getopt(sys.argv[1:], "hy:r:x:", ["help" ,"year=","resource=","excel="]) except getopt.GetoptError, err: usage(str(err))# will print something like "option -a not recognized" return(2) # -------------------------- # Verify passed arguments # -------------------------- year = -1 resource = None excel = None for o, a in opts: if o in ("-r", "--resource"): resource = a elif o in ("-x", "--excel"): excel = a elif o in ("-h", "--help"): usage() return(0) elif o in ("-y", "--year"): yr = a else: assert False, "unhandled option" # ------------------------------ # Does the resource file exist? # ------------------------------ if( ((resource == None ) or ( not os.path.isfile( resource ) )) and (( excel == None ) or ( not os.path.isfile( excel ) )) ): usage("bad Input Resource/Excel File") return 1 # ----------------------- # Read in "rc" file # ----------------------- if( resource ): rc = ESGFresources( resource ) if( excel ): rc = ESGFexcel( excel ) # -------------------------------- # Extract CMIP5 Table information # -------------------------------- oTable = CMORTable(rc['inpath'], rc['table']) if( not 'original_var' in rc.resources.keys() ): sys.exit(-1) rc['project_id'] = oTable[ 'project_id' ] rc['product'] = oTable[ 'product' ] rc['modeling_realm'] = oTable[ 'modeling_realm' ] rc['frequency'] = oTable[ 'frequency' ] if( process(rc) ): return -1 return 0 # ******************************************************************** # # Call main program and return exit code # # ******************************************************************** if __name__ == '__main__': sys.exit(main())
MJJoyce/climate
obs4MIPs/obs4MIPs_process.py
Python
apache-2.0
19,744
[ "NetCDF" ]
5217df7ceb5a742e5e0fbd9b1cf83ae3e005750e5502bf2b6191ce8f33df4c91
""" :mod: GFAL2_XROOTStorage ================= .. module: python :synopsis: XROOT module based on the GFAL2_StorageBase class. """ # from DIRAC from DIRAC import gLogger from DIRAC.Resources.Storage.GFAL2_StorageBase import GFAL2_StorageBase class GFAL2_XROOTStorage( GFAL2_StorageBase ): """ .. class:: GFAL2_XROOTStorage Xroot interface to StorageElement using gfal2 """ def __init__( self, storageName, parameters ): """ c'tor :param self: self reference :param str storageName: SE name :param str protocol: protocol to use :param str rootdir: base path for vo files :param str host: SE host :param int port: port to use to communicate with :host: :param str spaceToken: space token :param str wspath: location of SRM on :host: """ self.log = gLogger.getSubLogger( "GFAL2_XROOTStorage", True ) # # init base class GFAL2_StorageBase.__init__( self, storageName, parameters ) # XROOT has problems with checksums at the moment. self.checksumType = None # self.log.setLevel( "DEBUG" ) self.pluginName = 'GFAL2_XROOT' self.protocol = self.protocolParameters['Protocol'] self.host = self.protocolParameters['Host'] # Aweful hack to cope for the moment with the inability of RSS to deal with something else than SRM # self.port = "" # self.wspath = "" # self.spaceToken = "" self.protocolParameters['Port'] = 0 self.protocolParameters['WSUrl'] = 0 self.protocolParameters['SpaceToken'] = 0
coberger/DIRAC
Resources/Storage/GFAL2_XROOTStorage.py
Python
gpl-3.0
1,523
[ "DIRAC" ]
c7e41084cbfc9dfa6a625b5b6fe6a05a9a1149edcccb0698d5a0b69c4690eb5e
# Copyright (c) 2011, Jimmy Cao All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials provided # with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS # AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER # OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import copy import fnmatch import itertools import math import os import random import re import signal import sqlite3 import string import subprocess import sys import threading import time import traceback import urllib.request from collections import defaultdict from datetime import datetime, timedelta from oyoyo.parse import parse_nick import botconfig import src.settings as var from src import decorators, events, logger # done this way so that events is accessible in !eval (useful for debugging) Event = events.Event debuglog = logger("debug.log", write=False, display=False) # will be True if in debug mode errlog = logger("errors.log") plog = logger(None) #use this instead of print so that logs have timestamps is_admin = var.is_admin is_owner = var.is_owner cmd = decorators.cmd hook = decorators.hook COMMANDS = decorators.COMMANDS # Game Logic Begins: var.LAST_STATS = None var.LAST_VOTES = None var.LAST_ADMINS = None var.LAST_GSTATS = None var.LAST_PSTATS = None var.LAST_TIME = None var.LAST_START = {} var.LAST_WAIT = {} var.USERS = {} var.ADMIN_PINGING = False var.ROLES = {"person" : []} var.SPECIAL_ROLES = {} var.ORIGINAL_ROLES = {} var.PLAYERS = {} var.DCED_PLAYERS = {} var.ADMIN_TO_PING = None var.AFTER_FLASTGAME = None var.PINGING_IFS = False var.TIMERS = {} var.ORIGINAL_SETTINGS = {} var.CURRENT_GAMEMODE = var.GAME_MODES["default"][0]() var.LAST_SAID_TIME = {} var.GAME_START_TIME = datetime.now() # for idle checker only var.CAN_START_TIME = 0 var.GRAVEYARD_LOCK = threading.RLock() var.WARNING_LOCK = threading.RLock() var.WAIT_TB_LOCK = threading.RLock() var.STARTED_DAY_PLAYERS = 0 var.DISCONNECTED = {} # players who got disconnected var.RESTARTING = False var.OPPED = False # Keeps track of whether the bot is opped var.BITTEN = {} var.BITTEN_ROLES = {} var.LYCAN_ROLES = {} var.VENGEFUL_GHOSTS = {} var.CHARMED = set() if botconfig.DEBUG_MODE and var.DISABLE_DEBUG_MODE_TIMERS: var.NIGHT_TIME_LIMIT = 0 # 120 var.NIGHT_TIME_WARN = 0 # 90 var.DAY_TIME_LIMIT = 0 # 720 var.DAY_TIME_WARN = 0 # 600 var.SHORT_DAY_LIMIT = 0 # 520 var.SHORT_DAY_WARN = 0 # 400 if botconfig.DEBUG_MODE and var.DISABLE_DEBUG_MODE_REAPER: var.KILL_IDLE_TIME = 0 # 300 var.WARN_IDLE_TIME = 0 # 180 var.PM_WARN_IDLE_TIME = 0 # 240 var.JOIN_TIME_LIMIT = 0 # 3600 if botconfig.DEBUG_MODE and var.DISABLE_DEBUG_MODE_STASIS: var.LEAVE_STASIS_PENALTY = 0 var.IDLE_STASIS_PENALTY = 0 var.PART_STASIS_PENALTY = 0 var.ACC_STASIS_PENALTY = 0 if botconfig.DEBUG_MODE and var.DISABLE_DEBUG_MODE_TIME_LORD: var.TIME_LORD_DAY_LIMIT = 0 # 60 var.TIME_LORD_DAY_WARN = 0 # 45 var.TIME_LORD_NIGHT_LIMIT = 0 # 30 var.TIME_LORD_NIGHT_WARN = 0 # 20 plog("Loading Werewolf IRC bot") def connect_callback(cli): SIGUSR1 = getattr(signal, "SIGUSR1", None) SIGUSR2 = getattr(signal, "SIGUSR2", None) def sighandler(signum, frame): if signum == signal.SIGINT: # Exit immediately if Ctrl-C is pressed twice signal.signal(signal.SIGINT, signal.SIG_DFL) if signum in (signal.SIGINT, signal.SIGTERM): forced_exit.func(cli, "<console>", botconfig.CHANNEL, "") elif signum == SIGUSR1: restart_program.func(cli, "<console>", botconfig.CHANNEL, "") elif signum == SIGUSR2: plog("Scheduling aftergame restart") aftergame.func(cli, "<console>", botconfig.CHANNEL, "frestart") signal.signal(signal.SIGINT, sighandler) signal.signal(signal.SIGTERM, sighandler) if SIGUSR1: signal.signal(SIGUSR1, sighandler) if SIGUSR2: signal.signal(SIGUSR2, sighandler) to_be_devoiced = [] cmodes = [] @hook("quietlist", hookid=294) def on_quietlist(cli, server, botnick, channel, q, quieted, by, something): if re.search(r"^{0}.+\!\*@\*$".format(var.QUIET_PREFIX), quieted): # only unquiet people quieted by bot cmodes.append(("-{0}".format(var.QUIET_MODE), quieted)) @hook("banlist", hookid=294) def on_banlist(cli, server, botnick, channel, ban, by, timestamp): if re.search(r"^{0}.+\!\*@\*$".format(var.QUIET_PREFIX), ban): cmodes.append(("-{0}".format(var.QUIET_MODE), ban)) @hook("whoreply", hookid=295) def on_whoreply(cli, svr, botnick, chan, user, host, server, nick, status, rest): if not var.DISABLE_ACCOUNTS: plog("IRCd does not support accounts, disabling account-related features.") var.DISABLE_ACCOUNTS = True var.ACCOUNTS_ONLY = False if nick in var.USERS: return if nick == botconfig.NICK: cli.nickname = nick cli.ident = user cli.hostmask = host if "+" in status: to_be_devoiced.append(user) newstat = "" for stat in status: if not stat in var.MODES_PREFIXES: continue newstat += var.MODES_PREFIXES[stat] var.USERS[nick] = dict(cloak=host,account="*",inchan=True,modes=set(newstat),moded=set()) @hook("whospcrpl", hookid=295) def on_whoreply(cli, server, nick, ident, cloak, _, user, status, acc): if user in var.USERS: return # Don't add someone who is already there if user == botconfig.NICK: cli.nickname = user cli.ident = ident cli.hostmask = cloak if acc == "0": acc = "*" if "+" in status: to_be_devoiced.append(user) newstat = "" for stat in status: if not stat in var.MODES_PREFIXES: continue newstat += var.MODES_PREFIXES[stat] var.USERS[user] = dict(cloak=cloak,account=acc,inchan=True,modes=set(newstat),moded=set()) @hook("endofwho", hookid=295) def afterwho(*args): # Devoice all on connect for nick in to_be_devoiced: cmodes.append(("-v", nick)) try: # If the bot was restarted in the middle of the join phase, ping players that were joined. with sqlite3.connect("data.sqlite3", check_same_thread=False) as conn: c = conn.cursor() c.execute("SELECT players FROM pre_restart_state") players = c.fetchone()[0] if players: msg = "PING! " + var.break_long_message(players.split()).replace("\n", "\nPING! ") cli.msg(botconfig.CHANNEL, msg) c.execute("UPDATE pre_restart_state SET players = NULL") except Exception: notify_error(cli, botconfig.CHANNEL, errlog) # Unhook the WHO hooks hook.unhook(295) #bot can be tricked into thinking it's still opped by doing multiple modes at once @hook("mode", hookid=296) def on_give_me_ops(cli, nick, chan, modeaction, target="", *other): if chan != botconfig.CHANNEL: return if modeaction == "+o" and target == botconfig.NICK: var.OPPED = True if botconfig.NICK in var.USERS: var.USERS[botconfig.NICK]["modes"].add("o") if var.PHASE == "none": @hook("quietlistend", hookid=297) def on_quietlist_end(cli, svr, nick, chan, *etc): if chan == botconfig.CHANNEL: mass_mode(cli, cmodes, ["-m"]) @hook("endofbanlist", hookid=297) def on_banlist_end(cli, svr, nick, chan, *etc): if chan == botconfig.CHANNEL: mass_mode(cli, cmodes, ["-m"]) cli.mode(botconfig.CHANNEL, var.QUIET_MODE) # unquiet all elif modeaction == "-o" and target == botconfig.NICK: var.OPPED = False cli.msg("ChanServ", "op " + botconfig.CHANNEL) if var.DISABLE_ACCOUNTS: cli.who(botconfig.CHANNEL) else: cli.who(botconfig.CHANNEL, "%uhsnfa") @hook("mode") def check_for_modes(cli, rnick, chan, modeaction, *target): nick = parse_nick(rnick)[0] if chan != botconfig.CHANNEL: return oldpref = "" trgt = "" keeptrg = False target = list(target) if target and target != [botconfig.NICK]: while modeaction: if len(modeaction) > 1: prefix = modeaction[0] change = modeaction[1] else: prefix = oldpref change = modeaction[0] if not keeptrg: if target: trgt = target.pop(0) else: trgt = "" # Last item, no target keeptrg = False if not prefix in ("-", "+"): change = prefix prefix = oldpref else: oldpref = prefix modeaction = modeaction[modeaction.index(change)+1:] if change in var.MODES_NOSET: keeptrg = True if prefix == "-" and change in var.MODES_ONLYSET: keeptrg = True if change not in var.MODES_PREFIXES.values(): continue if trgt in var.USERS: if prefix == "+": var.USERS[trgt]["modes"].add(change) if change in var.USERS[trgt]["moded"]: var.USERS[trgt]["moded"].remove(change) elif change in var.USERS[trgt]["modes"]: var.USERS[trgt]["modes"].remove(change) # Only sync modes if a server changed modes because # 1) human ops probably know better # 2) other bots might start a fight over modes # 3) recursion; we see our own mode changes. if "!" not in rnick: sync_modes(cli) #completes a partial nickname or string from a list def complete_match(string, matches): num_matches = 0 bestmatch = string for possible in matches: if string == possible: return string, 1 if possible.startswith(string) or possible.lstrip("[{\\^_`|}]").startswith(string): bestmatch = possible num_matches += 1 if num_matches != 1: return None, num_matches else: return bestmatch, 1 #wrapper around complete_match() used for roles def get_victim(cli, nick, victim, in_chan, self_in_list = False): if not victim: if in_chan: cli.notice(nick, "Not enough parameters.") else: pm(cli, nick, "Not enough parameters") return pl = [x for x in var.list_players() if x != nick or self_in_list] pll = [x.lower() for x in pl] tempvictim, num_matches = complete_match(victim.lower(), pll) if not tempvictim: #ensure messages about not being able to act on yourself work if num_matches == 0 and nick.lower().startswith(victim.lower()): return nick if in_chan: cli.notice(nick, "\u0002{0}\u0002 is not currently playing.".format(victim)) else: pm(cli, nick, "\u0002{0}\u0002 is not currently playing.".format(victim)) return return pl[pll.index(tempvictim)] #convert back to normal casing def mass_mode(cli, md_param, md_plain): """ Example: mass_mode(cli, [('+v', 'asdf'), ('-v','wobosd')], ['-m']) """ lmd = len(md_param) # store how many mode changes to do if md_param: for start_i in range(0, lmd, var.MODELIMIT): # 4 mode-changes at a time if start_i + var.MODELIMIT > lmd: # If this is a remainder (mode-changes < 4) z = list(zip(*md_param[start_i:])) # zip this remainder ei = lmd % var.MODELIMIT # len(z) else: z = list(zip(*md_param[start_i:start_i+var.MODELIMIT])) # zip four ei = var.MODELIMIT # len(z) # Now z equal something like [('+v', '-v'), ('asdf', 'wobosd')] arg1 = "".join(md_plain) + "".join(z[0]) arg2 = " ".join(z[1]) # + " " + " ".join([x+"!*@*" for x in z[1]]) cli.mode(botconfig.CHANNEL, arg1, arg2) else: cli.mode(botconfig.CHANNEL, "".join(md_plain)) def pm(cli, target, message): # message either privmsg or notice, depending on user settings if is_fake_nick(target) and botconfig.DEBUG_MODE: debuglog("Would message fake nick {0}: {1!r}".format(target, message)) return if is_user_notice(target): cli.notice(target, message) return cli.msg(target, message) decorators.pm = pm def reset_settings(): var.CURRENT_GAMEMODE.teardown() var.CURRENT_GAMEMODE = var.GAME_MODES["default"][0]() for attr in list(var.ORIGINAL_SETTINGS.keys()): setattr(var, attr, var.ORIGINAL_SETTINGS[attr]) dict.clear(var.ORIGINAL_SETTINGS) def reset_modes_timers(cli): # Reset game timers with var.WARNING_LOCK: # make sure it isn't being used by the ping join handler for x, timr in var.TIMERS.items(): timr[0].cancel() var.TIMERS = {} # Reset modes cmodes = [] for plr in var.list_players(): cmodes.append(("-v", plr)) if var.AUTO_TOGGLE_MODES: for plr in var.USERS: if not "moded" in var.USERS[plr]: continue for mode in var.USERS[plr]["moded"]: cmodes.append(("+"+mode, plr)) var.USERS[plr]["modes"].update(var.USERS[plr]["moded"]) var.USERS[plr]["moded"] = set() if var.QUIET_DEAD_PLAYERS: for deadguy in var.DEAD: if not is_fake_nick(deadguy): cmodes.append(("-{0}".format(var.QUIET_MODE), var.QUIET_PREFIX+deadguy+"!*@*")) mass_mode(cli, cmodes, ["-m"]) def reset(): var.PHASE = "none" # "join", "day", or "night" var.GAME_ID = 0 var.RESTART_TRIES = 0 var.DEAD = [] var.ROLES = {"person" : []} var.JOINED_THIS_GAME = [] # keeps track of who already joined this game at least once (cloaks) var.JOINED_THIS_GAME_ACCS = [] # same, except accounts var.PINGED_ALREADY = set() var.PINGED_ALREADY_ACCS = set() var.NO_LYNCH = [] var.FGAMED = False var.GAMEMODE_VOTES = {} #list of players who have used !game reset_settings() dict.clear(var.LAST_SAID_TIME) dict.clear(var.PLAYERS) dict.clear(var.DCED_PLAYERS) dict.clear(var.DISCONNECTED) reset() def make_stasis(nick, penalty): if nick in var.USERS: cloak = var.USERS[nick]["cloak"] acc = var.USERS[nick]["account"] else: return # Can't do it if not acc or acc == "*": acc = None if not cloak and not acc: return # Can't do it, either if acc: if penalty == 0: if acc in var.STASISED_ACCS: del var.STASISED_ACCS[acc] var.set_stasis_acc(acc, 0) else: var.STASISED_ACCS[acc] += penalty var.set_stasis_acc(acc, var.STASISED_ACCS[acc]) if (not var.ACCOUNTS_ONLY or not acc) and cloak: if penalty == 0: if cloak in var.STASISED: del var.STASISED[cloak] var.set_stasis(cloak, 0) else: var.STASISED[cloak] += penalty var.set_stasis(cloak, var.STASISED[cloak]) @cmd("fsync", admin_only=True, pm=True) def fsync(cli, nick, chan, rest): """Makes the bot apply the currently appropriate channel modes.""" sync_modes(cli) def sync_modes(cli): voices = [] pl = var.list_players() for nick, u in var.USERS.items(): if nick in pl and "v" not in u.get("modes", set()): voices.append(("+v", nick)) elif nick not in pl and "v" in u.get("modes", set()): voices.append(("-v", nick)) if var.PHASE in ("day", "night"): other = ["+m"] else: other = ["-m"] mass_mode(cli, voices, other) @cmd("fdie", "fbye", admin_only=True, pm=True) def forced_exit(cli, nick, chan, rest): """Forces the bot to close.""" if var.PHASE in ("day", "night"): try: stop_game(cli) except Exception: traceback.print_exc() try: reset_modes_timers(cli) except Exception: traceback.print_exc() try: reset() except Exception: traceback.print_exc() msg = "{0} quit from {1}" if rest.strip(): msg += " ({2})" try: cli.quit(msg.format("Scheduled" if forced_exit.aftergame else "Forced", nick, rest.strip())) except Exception: traceback.print_exc() sys.exit() def _restart_program(cli, mode=None): plog("RESTARTING") python = sys.executable if mode: assert mode in ("normal", "verbose", "debug") os.execl(python, python, sys.argv[0], "--{0}".format(mode)) else: os.execl(python, python, *sys.argv) @cmd("frestart", admin_only=True, pm=True) def restart_program(cli, nick, chan, rest): """Restarts the bot.""" if var.PHASE in ("day", "night"): try: stop_game(cli) except Exception: traceback.print_exc() try: reset_modes_timers(cli) except Exception: traceback.print_exc() try: with sqlite3.connect("data.sqlite3", check_same_thread=False) as conn: c = conn.cursor() players = var.list_players() if players: c.execute("UPDATE pre_restart_state SET players = ?", (" ".join(players),)) except Exception: traceback.print_exc() try: reset() except Exception: traceback.print_exc() msg = "{0} restart from {1}".format( "Scheduled" if restart_program.aftergame else "Forced", nick) rest = rest.strip() mode = None if rest: args = rest.split() first_arg = args[0].lower() if first_arg.endswith("mode") and first_arg != "mode": mode = first_arg.replace("mode", "") VALID_MODES = ("normal", "verbose", "debug") if mode not in VALID_MODES: err_msg = ("\u0002{0}\u0002 is not a valid mode. Valid " "modes are: {1}").format(mode, ", ".join(VALID_MODES)) if chan == nick: pm(cli, nick, err_msg) else: cli.notice(nick, err_msg) return msg += " in {0} mode".format(mode) rest = " ".join(args[1:]) if rest: msg += " ({0})".format(rest) try: cli.quit(msg.format(nick, rest.strip())) except Exception: traceback.print_exc() @hook("quit") def restart_buffer(cli, raw_nick, reason): nick, _, __, cloak = parse_nick(raw_nick) # restart the bot once our quit message goes though to ensure entire IRC queue is sent # if the bot is using a nick that isn't botconfig.NICK, then stop breaking things and fdie if nick == botconfig.NICK: _restart_program(cli, mode) # This is checked in the on_error handler. Some IRCds, such as InspIRCd, don't send the bot # its own QUIT message, so we need to use ERROR. Ideally, we shouldn't even need the above # handler now, but I'm keeping it for now just in case. var.RESTARTING = True @cmd("ping", pm=True) def pinger(cli, nick, chan, rest): """Check if you or the bot is still connected.""" message = random.choice(var.PING_MESSAGES).format(nick=nick) if chan == nick: pm(cli, nick, message) else: cli.msg(chan, message) @cmd("simple", raw_nick=True, pm=True) def mark_simple_notify(cli, nick, chan, rest): """Makes the bot give you simple role instructions, in case you are familiar with the roles.""" nick, _, __, cloak = parse_nick(nick) if nick in var.USERS: cloak = var.USERS[nick]["cloak"] acc = var.USERS[nick]["account"] else: acc = None if not acc or acc == "*": acc = None if acc: # Prioritize account if acc in var.SIMPLE_NOTIFY_ACCS: var.SIMPLE_NOTIFY_ACCS.remove(acc) var.remove_simple_rolemsg_acc(acc) if cloak in var.SIMPLE_NOTIFY: var.SIMPLE_NOTIFY.remove(cloak) var.remove_simple_rolemsg(cloak) cli.notice(nick, "You now no longer receive simple role instructions.") return var.SIMPLE_NOTIFY_ACCS.append(acc) var.add_simple_rolemsg_acc(acc) elif var.ACCOUNTS_ONLY: cli.notice(nick, "You are not logged in to NickServ.") return else: # Not logged in, fall back to hostmask if cloak in var.SIMPLE_NOTIFY: var.SIMPLE_NOTIFY.remove(cloak) var.remove_simple_rolemsg(cloak) cli.notice(nick, "You now no longer receive simple role instructions.") return var.SIMPLE_NOTIFY.append(cloak) var.add_simple_rolemsg(cloak) cli.notice(nick, "You now receive simple role instructions.") def is_user_simple(nick): if nick in var.USERS: cloak = var.USERS[nick]["cloak"] acc = var.USERS[nick]["account"] else: return False if acc and acc != "*" and not var.DISABLE_ACCOUNTS: if acc in var.SIMPLE_NOTIFY_ACCS: return True return False elif cloak in var.SIMPLE_NOTIFY and not var.ACCOUNTS_ONLY: return True return False @cmd("notice", raw_nick=True, pm=True) def mark_prefer_notice(cli, nick, chan, rest): """Makes the bot NOTICE you for every interaction.""" nick, _, __, cloak = parse_nick(nick) if nick in var.USERS: cloak = var.USERS[nick]["cloak"] acc = var.USERS[nick]["account"] else: acc = None if not acc or acc == "*": acc = None if acc and not var.DISABLE_ACCOUNTS: # Do things by account if logged in if acc in var.PREFER_NOTICE_ACCS: var.PREFER_NOTICE_ACCS.remove(acc) var.remove_prefer_notice_acc(acc) if cloak in var.PREFER_NOTICE: var.PREFER_NOTICE.remove(cloak) var.remove_prefer_notice(cloak) cli.notice(nick, "Gameplay interactions will now use PRIVMSG for you.") return var.PREFER_NOTICE_ACCS.append(acc) var.add_prefer_notice_acc(acc) elif var.ACCOUNTS_ONLY: cli.notice(nick, "You are not logged in to NickServ.") return else: # Not logged in if cloak in var.PREFER_NOTICE: var.PREFER_NOTICE.remove(cloak) var.remove_prefer_notice(cloak) cli.notice(nick, "Gameplay interactions will now use PRIVMSG for you.") return var.PREFER_NOTICE.append(cloak) var.add_prefer_notice(cloak) cli.notice(nick, "The bot will now always NOTICE you.") def is_user_notice(nick): if nick in var.USERS and var.USERS[nick]["account"] and var.USERS[nick]["account"] != "*" and not var.DISABLE_ACCOUNTS: if var.USERS[nick]["account"] in var.PREFER_NOTICE_ACCS: return True if nick in var.USERS and var.USERS[nick]["cloak"] in var.PREFER_NOTICE and not var.ACCOUNTS_ONLY: return True return False @cmd("swap", "replace", pm=True, phases=("join", "day", "night")) def replace(cli, nick, chan, rest): """Swap out a player logged in to your account.""" if nick not in var.USERS: pm(cli, nick, "You need to be in {0} to use that command.".format(botconfig.CHANNEL)) return if nick in var.list_players(): if chan == nick: pm(cli, nick, "You're already playing!") else: cli.notice(nick, "You're already playing!") return account = var.USERS[nick]["account"] if not account or account == "*": if chan == nick: pm(cli, nick, "You are not logged in to NickServ.") else: cli.notice(nick, "You are not logged in to NickServ.") return rest = rest.split() if not rest: # bare call target = None for user in var.USERS: if var.USERS[user]["account"] == account: if user == nick or (user not in var.list_players() and user not in var.VENGEFUL_GHOSTS): pass elif target is None: target = user else: if chan == nick: pm(cli, nick, "More than one player is logged in to your account. Use 'swap <nick>' to swap.") else: cli.notice(nick, "More than one player is logged in to your account. Use '{0}swap <nick>' to swap.".format(botconfig.CMD_CHAR)) return if target is None: msg = "You do not appear to be playing. Make sure you are identified to the same account." if chan == nick: pm(cli, nick, msg) else: cli.notice(nick, msg) return else: target, _ = complete_match(rest[0], var.list_players() + list(var.VENGEFUL_GHOSTS.keys())) if target not in var.list_players() and target not in var.VENGEFUL_GHOSTS: msg = "That person is no{0} playing.".format(" longer" if target in var.DEAD else "t") if chan == nick: pm(cli, nick, msg) else: cli.notice(nick, msg) return if target in var.USERS: if var.USERS[target]["account"] == "*": if chan == nick: pm(cli, nick, "That person is not logged in to NickServ.") else: cli.notice(nick, "That person is not logged in to NickServ.") return if var.USERS[target]["account"] == account and nick != target: rename_player(cli, target, nick) mass_mode(cli, [("-v", target), ("+v", nick)], []) cli.msg(botconfig.CHANNEL, "\u0002{0}\u0002 has swapped places with \u0002{1}\u0002.".format(nick, target)) myrole.caller(cli, nick, chan, "") @cmd("pingif", "pingme", "pingat", "pingpref", pm=True) def altpinger(cli, nick, chan, rest): """Pings you when the number of players reaches your preference. Usage: "pingif <players>". https://github.com/lykoss/lykos/wiki/Pingif""" players = is_user_altpinged(nick) rest = rest.split() if nick in var.USERS: cloak = var.USERS[nick]["cloak"] acc = var.USERS[nick]["account"] else: if chan == nick: pm(cli, nick, "You need to be in {0} to use that command.".format(botconfig.CHANNEL)) else: # former message: "You won the lottery! This is a bug though, so report it to the admins." cli.notice(nick, "You need to be in {0} to use that command.".format(botconfig.CHANNEL)) return if (not acc or acc == "*") and var.ACCOUNTS_ONLY: if chan == nick: pm(cli, nick, "You are not logged in to NickServ.") else: cli.notice(nick, "You are not logged in to NickServ.") return msg = [] if not rest: if players: msg.append("You will be pinged when there are at least {0} players joined.".format(players)) else: msg.append("You do not have any ping preferences currently set.") elif any((rest[0] in ("off", "never"), rest[0].isdigit() and int(rest[0]) == 0, len(rest) > 1 and rest[1].isdigit() and int(rest[1]) == 0)): if players: msg.append("Your ping preferences have been removed (was {0}).".format(players)) toggle_altpinged_status(nick, 0, players) else: msg.append("You do not have any preferences set.") elif rest[0].isdigit() or (len(rest) > 1 and rest[1].isdigit()): if rest[0].isdigit(): num = int(rest[0]) else: num = int(rest[1]) if num > 999: msg.append("That number is too large.") elif players == num: msg.append("Your ping preferences are already set to {0}.".format(num)) elif players: msg.append("Your ping preferences have been changed from {0} to {1}.".format(players, num)) toggle_altpinged_status(nick, num, players) else: msg.append("Your ping preferences have been set to {0}.".format(num)) toggle_altpinged_status(nick, num) else: msg.append("Invalid parameter. Please enter a non-negative integer or a valid preference.") if chan == nick: pm(cli, nick, "\n".join(msg)) else: cli.notice(nick, "\n".join(msg)) def is_user_altpinged(nick): if nick in var.USERS.keys(): cloak = var.USERS[nick]["cloak"] acc = var.USERS[nick]["account"] else: return 0 if not var.DISABLE_ACCOUNTS and acc and acc != "*": if acc in var.PING_IF_PREFS_ACCS.keys(): return var.PING_IF_PREFS_ACCS[acc] elif not var.ACCOUNTS_ONLY and cloak in var.PING_IF_PREFS.keys(): return var.PING_IF_PREFS[cloak] return 0 def toggle_altpinged_status(nick, value, old=None): # nick should be in var.USERS if not fake; if not, let the error propagate cloak = var.USERS[nick]["cloak"] acc = var.USERS[nick]["account"] if value == 0: if not var.DISABLE_ACCOUNTS and acc and acc != "*": if acc in var.PING_IF_PREFS_ACCS.keys(): del var.PING_IF_PREFS_ACCS[acc] var.set_pingif_status(acc, True, 0) if old is not None: with var.WARNING_LOCK: if old in var.PING_IF_NUMS_ACCS.keys(): if acc in var.PING_IF_NUMS_ACCS[old]: var.PING_IF_NUMS_ACCS[old].remove(acc) if not var.ACCOUNTS_ONLY and cloak in var.PING_IF_PREFS.keys(): del var.PING_IF_PREFS[cloak] var.set_pingif_status(cloak, False, 0) if old is not None: with var.WARNING_LOCK: if old in var.PING_IF_NUMS.keys(): if cloak in var.PING_IF_NUMS[old]: var.PING_IF_NUMS[old].remove(cloak) else: if not var.DISABLE_ACCOUNTS and acc and acc != "*": var.PING_IF_PREFS_ACCS[acc] = value var.set_pingif_status(acc, True, value) with var.WARNING_LOCK: if value not in var.PING_IF_NUMS_ACCS.keys(): var.PING_IF_NUMS_ACCS[value] = [] var.PING_IF_NUMS_ACCS[value].append(acc) if old is not None: if old in var.PING_IF_NUMS_ACCS.keys(): if acc in var.PING_IF_NUMS_ACCS[old]: var.PING_IF_NUMS_ACCS[old].remove(acc) elif not var.ACCOUNTS_ONLY: var.PING_IF_PREFS[cloak] = value var.set_pingif_status(cloak, False, value) with var.WARNING_LOCK: if value not in var.PING_IF_NUMS.keys(): var.PING_IF_NUMS[value] = [] var.PING_IF_NUMS[value].append(cloak) if old is not None: if old in var.PING_IF_NUMS.keys(): if cloak in var.PING_IF_NUMS[old]: var.PING_IF_NUMS[old].remove(cloak) def join_timer_handler(cli): with var.WARNING_LOCK: var.PINGING_IFS = True to_ping = [] pl = var.list_players() checker = [] chk_acc = [] # Add accounts/hosts to the list of possible players to ping if not var.DISABLE_ACCOUNTS: for num in var.PING_IF_NUMS_ACCS: if num <= len(pl): chk_acc.extend(var.PING_IF_NUMS_ACCS[num]) if not var.ACCOUNTS_ONLY: for num in var.PING_IF_NUMS: if num <= len(pl): checker.extend(var.PING_IF_NUMS[num]) # Don't ping alt connections of users that have already joined if not var.DISABLE_ACCOUNTS: for acc in (var.USERS[player]["account"] for player in pl if player in var.USERS): var.PINGED_ALREADY_ACCS.add(acc) # Remove players who have already been pinged from the list of possible players to ping for acc in chk_acc[:]: if acc in var.PINGED_ALREADY_ACCS: chk_acc.remove(acc) for cloak in checker[:]: if cloak in var.PINGED_ALREADY: checker.remove(cloak) # If there is nobody to ping, do nothing if not chk_acc and not checker: var.PINGING_IFS = False return @hook("whoreply", hookid=387) def ping_altpingers_noacc(cli, svr, botnick, chan, user, host, server, nick, status, rest): if ("G" in status or is_user_stasised(nick) or not var.PINGING_IFS or nick == botnick or nick in pl): return if host in checker: to_ping.append(nick) var.PINGED_ALREADY.add(host) @hook("whospcrpl", hookid=387) def ping_altpingers(cli, server, nick, ident, cloak, _, user, status, acc): if ("G" in status or is_user_stasised(user) or not var.PINGING_IFS or user == botconfig.NICK or user in pl): return # Create list of players to ping if acc and acc != "*": if acc in chk_acc: to_ping.append(user) var.PINGED_ALREADY_ACCS.add(acc) elif not var.ACCOUNTS_ONLY: to_ping.append(user) var.PINGED_ALREADY.add(cloak) @hook("endofwho", hookid=387) def fetch_altpingers(*stuff): # fun fact: if someone joined 10 seconds after someone else, the bot would break. # effectively, the join would delete join_pinger from var.TIMERS and this function # here would be reached before it was created again, thus erroring and crashing. # this is one of the multiple reasons we need unit testing # I was lucky to catch this in testing, as it requires precise timing # it only failed if a join happened while this outer func had started var.PINGING_IFS = False hook.unhook(387) if to_ping: to_ping.sort(key=lambda x: x.lower()) msg_prefix = "PING! {0} player{1}! ".format(len(pl), "" if len(pl) == 1 else "s") msg = msg_prefix + var.break_long_message(to_ping).replace("\n", "\n" + msg_prefix) cli.msg(botconfig.CHANNEL, msg) if not var.DISABLE_ACCOUNTS: cli.who(botconfig.CHANNEL, "%uhsnfa") else: cli.who(botconfig.CHANNEL) @cmd("join", "j", phases=("none", "join")) def join(cli, nick, chan, rest): """Either starts a new game of Werewolf or joins an existing game that has not started yet.""" if var.ACCOUNTS_ONLY: if nick in var.USERS and (not var.USERS[nick]["account"] or var.USERS[nick]["account"] == "*"): cli.notice(nick, "You are not logged in to NickServ.") return if join_player(cli, nick, chan) and rest and not var.FGAMED: gamemode = rest.lower().split()[0] if gamemode not in var.GAME_MODES.keys(): match, _ = complete_match(gamemode, var.GAME_MODES.keys() - ["roles"]) if not match: return gamemode = match if gamemode != "roles": var.GAMEMODE_VOTES[nick] = gamemode cli.msg(chan, "\u0002{0}\u0002 votes for the \u0002{1}\u0002 game mode.".format(nick, gamemode)) def join_player(cli, player, chan, who = None, forced = False): if who is None: who = player pl = var.list_players() if chan != botconfig.CHANNEL: return if not var.OPPED: cli.notice(who, "Sorry, I'm not opped in {0}.".format(chan)) cli.msg("ChanServ", "op " + botconfig.CHANNEL) return if player in var.USERS: cloak = var.USERS[player]["cloak"] acc = var.USERS[player]["account"] elif is_fake_nick(player) and botconfig.DEBUG_MODE: # fakenick cloak = None acc = None else: return # Not normal if not acc or acc == "*" or var.DISABLE_ACCOUNTS: acc = None stasis = is_user_stasised(player) if stasis: if forced and stasis == 1: if cloak in var.STASISED: var.set_stasis(cloak, 0) del var.STASISED[cloak] if not var.DISABLE_ACCOUNTS and acc in var.STASISED_ACCS: var.set_stasis_acc(acc, 0) del var.STASISED_ACCS[acc] else: cli.notice(who, "Sorry, but {0} in stasis for {1} game{2}.".format( "you are" if player == who else player + " is", stasis, "s" if stasis != 1 else "")) return cmodes = [("+v", player)] if var.PHASE == "none": if var.AUTO_TOGGLE_MODES and player in var.USERS and var.USERS[player]["modes"]: for mode in var.USERS[player]["modes"]: cmodes.append(("-"+mode, player)) var.USERS[player]["moded"].update(var.USERS[player]["modes"]) var.USERS[player]["modes"] = set() mass_mode(cli, cmodes, []) var.ROLES["person"].append(player) var.PHASE = "join" with var.WAIT_TB_LOCK: var.WAIT_TB_TOKENS = var.WAIT_TB_INIT var.WAIT_TB_LAST = time.time() var.GAME_ID = time.time() var.PINGED_ALREADY_ACCS = set() var.PINGED_ALREADY = set() if cloak: var.JOINED_THIS_GAME.append(cloak) if acc: var.JOINED_THIS_GAME_ACCS.append(acc) var.CAN_START_TIME = datetime.now() + timedelta(seconds=var.MINIMUM_WAIT) cli.msg(chan, ('\u0002{0}\u0002 has started a game of Werewolf. '+ 'Type "{1}join" to join. Type "{1}start" to start the game. '+ 'Type "{1}wait" to increase the start wait time.').format(player, botconfig.CMD_CHAR)) # Set join timer if var.JOIN_TIME_LIMIT > 0: t = threading.Timer(var.JOIN_TIME_LIMIT, kill_join, [cli, chan]) var.TIMERS["join"] = (t, time.time(), var.JOIN_TIME_LIMIT) t.daemon = True t.start() elif player in pl: cli.notice(who, "{0}'re already playing!".format("You" if who == player else "They")) return True elif len(pl) >= var.MAX_PLAYERS: cli.notice(who, "Too many players! Try again next time.") return elif var.PHASE != "join": cli.notice(who, "Sorry, but the game is already running. Try again next time.") return else: if acc is not None and not botconfig.DEBUG_MODE: for user in pl: if var.USERS[user]["account"] == acc: msg = "Sorry, but \u0002{0}\u0002 is already joined under {1} account.{2}" if who == player: cli.notice(who, msg.format(user, "your", " Please use '{0}swap' to join instead.".format(botconfig.CMD_CHAR))) else: cli.notice(who, msg.format(user, "their", "")) return var.ROLES["person"].append(player) if not is_fake_nick(player) or not botconfig.DEBUG_MODE: if var.AUTO_TOGGLE_MODES and var.USERS[player]["modes"]: for mode in var.USERS[player]["modes"]: cmodes.append(("-"+mode, player)) var.USERS[player]["moded"].update(var.USERS[player]["modes"]) var.USERS[player]["modes"] = set() mass_mode(cli, cmodes, []) cli.msg(chan, "\u0002{0}\u0002 has joined the game and raised the number of players to \u0002{1}\u0002.".format(player, len(pl) + 1)) if not is_fake_nick(player) and not cloak in var.JOINED_THIS_GAME and (not acc or not acc in var.JOINED_THIS_GAME_ACCS): # make sure this only happens once var.JOINED_THIS_GAME.append(cloak) if acc: var.JOINED_THIS_GAME_ACCS.append(acc) now = datetime.now() # add var.EXTRA_WAIT_JOIN to wait time if now > var.CAN_START_TIME: var.CAN_START_TIME = now + timedelta(seconds=var.EXTRA_WAIT_JOIN) else: var.CAN_START_TIME += timedelta(seconds=var.EXTRA_WAIT_JOIN) # make sure there's at least var.WAIT_AFTER_JOIN seconds of wait time left, if not add them if now + timedelta(seconds=var.WAIT_AFTER_JOIN) > var.CAN_START_TIME: var.CAN_START_TIME = now + timedelta(seconds=var.WAIT_AFTER_JOIN) var.LAST_STATS = None # reset var.LAST_GSTATS = None var.LAST_PSTATS = None var.LAST_TIME = None with var.WARNING_LOCK: if "join_pinger" in var.TIMERS: var.TIMERS["join_pinger"][0].cancel() t = threading.Timer(10, join_timer_handler, (cli,)) var.TIMERS["join_pinger"] = (t, time.time(), 10) t.daemon = True t.start() return True def kill_join(cli, chan): pl = var.list_players() pl.sort(key=lambda x: x.lower()) msg = "PING! " + var.break_long_message(pl).replace("\n", "\nPING! ") reset_modes_timers(cli) reset() cli.msg(chan, msg) cli.msg(chan, "The current game took too long to start and " + "has been canceled. If you are still active, " + "please join again to start a new game.") if var.AFTER_FLASTGAME is not None: var.AFTER_FLASTGAME() var.AFTER_FLASTGAME = None @cmd("fjoin", admin_only=True, phases=("none", "join")) def fjoin(cli, nick, chan, rest): """Forces someone to join a game.""" noticed = False fake = False if not rest.strip(): join_player(cli, nick, chan, forced=True) for tojoin in re.split(" +",rest): tojoin = tojoin.strip() if "-" in tojoin: first, hyphen, last = tojoin.partition("-") if first.isdigit() and last.isdigit(): if int(last)+1 - int(first) > var.MAX_PLAYERS - len(var.list_players()): cli.msg(chan, "{0}: Too many players to join.".format(nick)) break fake = True for i in range(int(first), int(last)+1): join_player(cli, str(i), chan, forced=True, who=nick) continue if not tojoin: continue ul = list(var.USERS.keys()) ull = [u.lower() for u in ul] if tojoin.lower() not in ull or not var.USERS[ul[ull.index(tojoin.lower())]]["inchan"]: if not is_fake_nick(tojoin) or not botconfig.DEBUG_MODE: if not noticed: # important cli.msg(chan, nick+(": You may only fjoin "+ "people who are in this channel.")) noticed = True continue if not is_fake_nick(tojoin): tojoin = ul[ull.index(tojoin.lower())].strip() if not botconfig.DEBUG_MODE and var.ACCOUNTS_ONLY: if not var.USERS[tojoin]["account"] or var.USERS[tojoin]["account"] == "*": cli.notice(nick, "{0} is not logged in to NickServ.".format(tojoin)) return elif botconfig.DEBUG_MODE: fake = True if tojoin != botconfig.NICK: join_player(cli, tojoin, chan, forced=True, who=nick) else: cli.notice(nick, "No, that won't be allowed.") if fake: cli.msg(chan, "\u0002{0}\u0002 used fjoin and raised the number of players to \u0002{1}\u0002.".format(nick, len(var.list_players()))) @cmd("fleave", "fquit", admin_only=True, phases=("join", "day", "night")) def fleave(cli, nick, chan, rest): """Forces someone to leave the game.""" if chan != botconfig.CHANNEL: return for a in re.split(" +",rest): a = a.strip() if not a: continue pl = var.list_players() pll = [x.lower() for x in pl] if a.lower() in pll: a = pl[pll.index(a.lower())] else: cli.msg(chan, nick+": That person is not playing.") return message = "\u0002{0}\u0002 is forcing \u0002{1}\u0002 to leave.".format(nick, a) if var.get_role(a) != "person" and var.ROLE_REVEAL: message += " Say goodbye to the \u0002{0}\u0002.".format(var.get_reveal_role(a)) if var.PHASE == "join": lpl = len(var.list_players()) - 1 if lpl == 0: message += " No more players remaining." else: message += " New player count: \u0002{0}\u0002".format(lpl) cli.msg(chan, message) del_player(cli, a, death_triggers=False) @cmd("fstart", admin_only=True, phases=("join",)) def fstart(cli, nick, chan, rest): """Forces the game to start immediately.""" cli.msg(botconfig.CHANNEL, "\u0002{0}\u0002 has forced the game to start.".format(nick)) start(cli, nick, botconfig.CHANNEL, forced = True) @hook("kick") def on_kicked(cli, nick, chan, victim, reason): if victim == botconfig.NICK: cli.join(chan) if chan == botconfig.CHANNEL: cli.msg("ChanServ", "op "+botconfig.CHANNEL) if var.AUTO_TOGGLE_MODES and victim in var.USERS: var.USERS[victim]["modes"] = set() var.USERS[victim]["moded"] = set() @hook("account") def on_account(cli, rnick, acc): nick, mode, user, cloak = parse_nick(rnick) chan = botconfig.CHANNEL if acc == "*" and var.ACCOUNTS_ONLY and nick in var.list_players(): cli.mode(chan, "-v", nick) leave(cli, "account", nick) cli.notice(nick, "Please reidentify to the account \u0002{0}\u0002".format(var.USERS[nick]["account"])) if nick in var.USERS.keys(): var.USERS[nick]["cloak"] = cloak var.USERS[nick]["account"] = acc if nick in var.DISCONNECTED.keys(): if acc == var.DISCONNECTED[nick][0]: if nick in var.USERS and var.USERS[nick]["inchan"]: with var.GRAVEYARD_LOCK: clk = var.DISCONNECTED[nick][1] act = var.DISCONNECTED[nick][0] if (acc == act and not var.DISABLE_ACCOUNTS) or (cloak == clk and not var.ACCOUNTS_ONLY): cli.mode(chan, "+v", nick, nick+"!*@*") del var.DISCONNECTED[nick] var.LAST_SAID_TIME[nick] = datetime.now() cli.msg(chan, "\u0002{0}\u0002 has returned to the village.".format(nick)) for r,rlist in var.ORIGINAL_ROLES.items(): if "(dced)"+nick in rlist: rlist.remove("(dced)"+nick) rlist.append(nick) break if nick in var.DCED_PLAYERS.keys(): var.PLAYERS[nick] = var.DCED_PLAYERS.pop(nick) @cmd("stats", "players", pm=True, phases=("join", "day", "night")) def stats(cli, nick, chan, rest): """Displays the player statistics.""" pl = var.list_players() if var.PHASE in ("night", "day"): pl = [x for x in var.ALL_PLAYERS if x in pl] if nick != chan and (nick in pl or var.PHASE == "join"): # only do this rate-limiting stuff if the person is in game if (var.LAST_STATS and var.LAST_STATS + timedelta(seconds=var.STATS_RATE_LIMIT) > datetime.now()): cli.notice(nick, ("This command is rate-limited. " + "Please wait a while before using it again.")) return var.LAST_STATS = datetime.now() _nick = nick + ": " if nick == chan: _nick = "" if chan == nick and nick in pl and var.get_role(nick) in var.WOLFCHAT_ROLES: ps = pl[:] random.shuffle(ps) for i, player in enumerate(ps): prole = var.get_role(player) if prole in var.WOLFCHAT_ROLES: cursed = "" if player in var.ROLES["cursed villager"]: cursed = "cursed " ps[i] = "\u0002{0}\u0002 ({1}{2})".format(player, cursed, prole) elif player in var.ROLES["cursed villager"]: ps[i] = player + " (cursed)" msg = "\u0002{0}\u0002 players: {1}".format(len(pl), ", ".join(ps)) elif len(pl) > 1: msg = "{0}\u0002{1}\u0002 players: {2}".format(_nick, len(pl), ", ".join(pl)) else: msg = "{0}\u00021\u0002 player: {1}".format(_nick, pl[0]) if nick == chan: pm(cli, nick, msg) else: if nick in pl or var.PHASE == "join": cli.msg(chan, msg) else: cli.notice(nick, msg) if var.PHASE == "join" or var.STATS_TYPE == "disabled": return message = [] # Instead of looping over the current roles, we start with the original set and apply # changes to it as public game events occur. This way, !stats output should duplicate # what a player would have if they were manually tracking who is what and did not # have any non-public information. The comments below explain the logic such a player # would be using to derive the list. Note that this logic is based on the assumption # that role reveal is on. If role reveal is off or team, stats type should probably be # set to disabled or team respectively instead of this, as this will then leak info. if var.STATS_TYPE == "default": # role: [min, max] -- "we may not necessarily know *exactly* how # many of a particular role there are, but we know that there is # between min and max of them" rolecounts = defaultdict(lambda: [0, 0]) start_roles = set() orig_roles = {} equiv_sets = {} total_immunizations = 0 extra_lycans = 0 # Step 1. Get our starting set of roles. This also calculates the maximum numbers for equivalency sets # (sets of roles that are decremented together because we can't know for sure which actually died). for r, v in var.ORIGINAL_ROLES.items(): if r in var.TEMPLATE_RESTRICTIONS.keys(): continue if len(v) == 0: continue start_roles.add(r) rolecounts[r] = [len(v), len(v)] for p in v: if p.startswith("(dced)"): p = p[6:] orig_roles[p] = r total_immunizations = rolecounts["doctor"][0] * math.ceil(len(var.ALL_PLAYERS) * var.DOCTOR_IMMUNIZATION_MULTIPLIER) if "amnesiac" in start_roles and "doctor" not in var.AMNESIAC_BLACKLIST: total_immunizations += rolecounts["amnesiac"][0] * math.ceil(len(var.ALL_PLAYERS) * var.DOCTOR_IMMUNIZATION_MULTIPLIER) extra_lycans = rolecounts["lycan"][0] - min(total_immunizations, rolecounts["lycan"][0]) equiv_sets["traitor_default"] = rolecounts["traitor"][0] + rolecounts[var.DEFAULT_ROLE][0] equiv_sets["lycan_villager"] = min(rolecounts["lycan"][0], total_immunizations) + rolecounts["villager"][0] equiv_sets["traitor_lycan_villager"] = equiv_sets["traitor_default"] + equiv_sets["lycan_villager"] - rolecounts[var.DEFAULT_ROLE][0] equiv_sets["amnesiac_clone"] = rolecounts["amnesiac"][0] + rolecounts["clone"][0] equiv_sets["amnesiac_clone_cub"] = rolecounts["amnesiac"][0] + rolecounts["clone"][0] + rolecounts["wolf cub"][0] equiv_sets["wolf_fallen"] = 0 equiv_sets["fallen_guardian"] = 0 if var.TRAITOR_TURNED: equiv_sets["traitor_default"] -= rolecounts["traitor"][0] equiv_sets["traitor_lycan_villager"] -= rolecounts["traitor"][0] rolecounts["wolf"][0] += rolecounts["traitor"][0] rolecounts["wolf"][1] += rolecounts["traitor"][1] rolecounts["traitor"] = [0, 0] # Step 2. Handle role swaps via exchange totem by modifying orig_roles -- the original # roles themselves didn't change, just who has them. By doing the swap early on we greatly # simplify the death logic below in step 3 -- to an outsider that doesn't know any info # the role swap might as well never happened and those people simply started with those roles; # they can't really tell the difference. for a, b in var.EXCHANGED_ROLES: orig_roles[a], orig_roles[b] = orig_roles[b], orig_roles[a] # Step 3. Work out people that turned into wolves via either alpha wolf, lycan, or lycanthropy totem # All three of those play the same "chilling howl" message, once per additional wolf num_alpha = rolecounts["alpha wolf"][0] num_angel = rolecounts["guardian angel"][0] if "amnesiac" in start_roles and "guardian angel" not in var.AMNESIAC_BLACKLIST: num_angel += rolecounts["amnesiac"][0] have_lycan_totem = False for idx, shaman in enumerate(var.TOTEM_ORDER): if (shaman in start_roles or ("amnesiac" in start_roles and shaman not in var.AMNESIAC_BLACKLIST)) and var.TOTEM_CHANCES["lycanthropy"][idx] > 0: have_lycan_totem = True extra_wolves = var.EXTRA_WOLVES num_wolves = rolecounts["wolf"][0] num_fallen = rolecounts["fallen angel"][0] while extra_wolves > 0: extra_wolves -= 1 if num_alpha == 0 and not have_lycan_totem: # This is easy, all of our extra wolves are actual lycans, and we know this for a fact rolecounts["wolf"][0] += 1 rolecounts["wolf"][1] += 1 num_wolves += 1 if rolecounts["lycan"][1] > 0: rolecounts["lycan"][0] -= 1 rolecounts["lycan"][1] -= 1 else: # amnesiac or clone became lycan and was subsequently turned maxcount = max(0, equiv_sets["amnesiac_clone"] - 1) rolecounts["amnesiac"][0] = max(0, rolecounts["amnesiac"][0] - 1) if rolecounts["amnesiac"][1] > maxcount: rolecounts["amnesiac"][1] = maxcount rolecounts["clone"][0] = max(0, rolecounts["clone"][0] - 1) if rolecounts["clone"][1] > maxcount: rolecounts["clone"][1] = maxcount equiv_sets["amnesiac_clone"] = maxcount if extra_lycans > 0: extra_lycans -= 1 else: equiv_sets["lycan_villager"] = max(0, equiv_sets["lycan_villager"] - 1) equiv_sets["traitor_lycan_villager"] = max(0, equiv_sets["traitor_lycan_villager"] - 1) elif num_alpha == 0 or num_angel == 0: # We are guaranteed to have gotten an additional wolf, but we can't guarantee it was an actual lycan rolecounts["wolf"][0] += 1 rolecounts["wolf"][1] += 1 num_wolves += 1 rolecounts["lycan"][0] = max(0, rolecounts["lycan"][0] - 1) # apply alphas before lycan totems (in case we don't actually have lycan totems) # this way if we don't have totems and alphas is 0 we hit guaranteed lycans above if num_alpha > 0: num_alpha -= 1 else: # We may have gotten an additional wolf or an additional fallen angel, we don't necessarily know which num_alpha -= 1 num_angel -= 1 rolecounts["lycan"][0] = max(0, rolecounts["lycan"][0] - 1) rolecounts["wolf"][1] += 1 rolecounts["fallen angel"][1] += 1 rolecounts["guardian angel"][0] -= 1 equiv_sets["wolf_fallen"] += 1 equiv_sets["fallen_guardian"] += 1 # Step 4. Remove all dead players # When rolesets are a thing (e.g. one of x, y, or z), those will be resolved here as well for p in var.ALL_PLAYERS: if p in pl: continue # pr should be the role the person gets revealed as should they die pr = orig_roles[p] if p in var.FINAL_ROLES and pr not in ("amnesiac", "clone"): pr = var.FINAL_ROLES[p] elif pr == "amnesiac" and not var.HIDDEN_AMNESIAC and p in var.FINAL_ROLES: pr = var.FINAL_ROLES[p] elif pr == "clone" and not var.HIDDEN_CLONE and p in var.FINAL_ROLES: pr = var.FINAL_ROLES[p] elif pr == "traitor" and var.TRAITOR_TURNED: # we turned every traitor into wolf above, which means even though # this person died as traitor, we need to deduct the count from wolves pr = "wolf" elif pr == "traitor" and var.HIDDEN_TRAITOR: pr = var.DEFAULT_ROLE # set to true if we kill more people than exist in a given role, # which means that amnesiac or clone must have became that role overkill = False if pr == var.DEFAULT_ROLE: # the person that died could have been traitor or an immunized lycan if var.DEFAULT_ROLE == "villager": maxcount = equiv_sets["traitor_lycan_villager"] else: maxcount = equiv_sets["traitor_default"] if maxcount == 0: overkill = True maxcount = max(0, maxcount - 1) if var.HIDDEN_TRAITOR and not var.TRAITOR_TURNED: rolecounts["traitor"][0] = max(0, rolecounts["traitor"][0] - 1) if rolecounts["traitor"][1] > maxcount: rolecounts["traitor"][1] = maxcount if var.DEFAULT_ROLE == "villager" and total_immunizations > 0: total_immunizations -= 1 rolecounts["lycan"][0] = max(0, rolecounts["lycan"][0] - 1) if rolecounts["lycan"][1] > maxcount + extra_lycans: rolecounts["lycan"][1] = maxcount + extra_lycans rolecounts[pr][0] = max(0, rolecounts[pr][0] - 1) if rolecounts[pr][1] > maxcount: rolecounts[pr][1] = maxcount if var.DEFAULT_ROLE == "villager": equiv_sets["traitor_lycan_villager"] = maxcount else: equiv_sets["traitor_default"] = maxcount elif pr == "villager": # the villager that died could have been an immunized lycan maxcount = max(0, equiv_sets["lycan_villager"] - 1) if equiv_sets["lycan_villager"] == 0: overkill = True if total_immunizations > 0: total_immunizations -= 1 rolecounts["lycan"][0] = max(0, rolecounts["lycan"][0] - 1) if rolecounts["lycan"][1] > maxcount + extra_lycans: rolecounts["lycan"][1] = maxcount + extra_lycans rolecounts[pr][0] = max(0, rolecounts[pr][0] - 1) if rolecounts[pr][1] > maxcount: rolecounts[pr][1] = maxcount equiv_sets["lycan_villager"] = maxcount elif pr == "lycan": # non-immunized lycan, reduce counts appropriately if rolecounts[pr][1] == 0: overkill = True rolecounts[pr][0] = max(0, rolecounts[pr][0] - 1) rolecounts[pr][1] = max(0, rolecounts[pr][1] - 1) if extra_lycans > 0: extra_lycans -= 1 else: equiv_sets["lycan_villager"] = max(0, equiv_sets["lycan_villager"] - 1) equiv_sets["traitor_lycan_villager"] = max(0, equiv_sets["traitor_lycan_villager"] - 1) elif pr == "wolf": # person that died could have possibly been turned by alpha if rolecounts[pr][1] == 0: # this overkill either means that we're hitting amnesiac/clone or that cubs turned overkill = True rolecounts[pr][0] = max(0, rolecounts[pr][0] - 1) rolecounts[pr][1] = max(0, rolecounts[pr][1] - 1) if num_wolves > 0: num_wolves -= 1 elif equiv_sets["wolf_fallen"] > 0: equiv_sets["wolf_fallen"] -= 1 equiv_sets["fallen_guardian"] = max(0, equiv_sets["fallen_guardian"] - 1) rolecounts["fallen angel"][1] = max(0, rolecounts["fallen angel"][1] - 1) rolecounts["guardian angel"][0] = max(rolecounts["guardian angel"][0] + 1, rolecounts["guardian angel"][1]) rolecounts["fallen angel"][0] = min(rolecounts["fallen angel"][0], rolecounts["fallen angel"][1]) elif pr == "fallen angel": # person that died could have possibly been turned by alpha if rolecounts[pr][1] == 0: overkill = True rolecounts[pr][0] = max(0, rolecounts[pr][0] - 1) rolecounts[pr][1] = max(0, rolecounts[pr][1] - 1) if num_fallen > 0: num_fallen -= 1 elif equiv_sets["wolf_fallen"] > 0: equiv_sets["wolf_fallen"] -= 1 equiv_sets["fallen_guardian"] = max(0, equiv_sets["fallen_guardian"] - 1) rolecounts["wolf"][1] = max(0, rolecounts["wolf"][1] - 1) rolecounts["wolf"][0] = min(rolecounts["wolf"][0], rolecounts["wolf"][1]) # this also means a GA died for sure (we lowered the lower bound previously) rolecounts["guardian angel"][1] = max(0, rolecounts["guardian angel"][1] - 1) elif pr == "guardian angel": if rolecounts[pr][1] == 0: overkill = True if rolecounts[pr][1] <= equiv_sets["fallen_guardian"] and equiv_sets["fallen_guardian"] > 0: # we got rid of a GA that was an FA candidate, so get rid of the FA as well # (this also means that there is a guaranteed wolf so add that in) equiv_sets["fallen_guardian"] = max(0, equiv_sets["fallen_guardian"] - 1) equiv_sets["wolf_fallen"] = max(0, equiv_sets["wolf_fallen"] - 1) rolecounts["fallen angel"][1] = max(rolecounts["fallen angel"][0], rolecounts["fallen angel"][1] - 1) rolecounts["wolf"][0] = min(rolecounts["wolf"][0] + 1, rolecounts["wolf"][1]) rolecounts[pr][0] = max(0, rolecounts[pr][0] - 1) rolecounts[pr][1] = max(0, rolecounts[pr][1] - 1) elif pr == "wolf cub": if rolecounts[pr][1] == 0: overkill = True rolecounts[pr][0] = max(0, rolecounts[pr][0] - 1) rolecounts[pr][1] = max(0, rolecounts[pr][1] - 1) equiv_sets["amnesiac_clone_cub"] = max(0, equiv_sets["amnesiac_clone_cub"] - 1) else: # person that died is guaranteed to be that role (e.g. not in an equiv_set) if rolecounts[pr][1] == 0: overkill = True rolecounts[pr][0] = max(0, rolecounts[pr][0] - 1) rolecounts[pr][1] = max(0, rolecounts[pr][1] - 1) if overkill: # we tried killing more people than exist in a role, so deduct from amnesiac/clone count instead if pr == "clone": # in this case, it means amnesiac became a clone (clone becoming amnesiac is impossible so we # do not have the converse check in here - clones always inherit what amnesiac turns into). equiv_sets["amnesiac_clone"] = max(0, equiv_sets["amnesiac_clone"] - 1) equiv_sets["amnesiac_clone_cub"] = max(0, equiv_sets["amnesiac_clone_cub"] - 1) rolecounts["amnesiac"][0] = max(0, rolecounts["amnesiac"][0] - 1) rolecounts["amnesiac"][1] = max(0, rolecounts["amnesiac"][1] - 1) elif pr == "wolf": # This could potentially be caused by a cub, not necessarily amnesiac/clone # as such we use a different equiv_set to reflect this maybe_cub = True num_realwolves = sum([rolecounts[r][1] for r in var.WOLF_ROLES if r != "wolf cub"]) if rolecounts["wolf cub"][1] == 0 or num_realwolves > 0: maybe_cub = False if (var.HIDDEN_AMNESIAC or rolecounts["amnesiac"][1] == 0) and (var.HIDDEN_CLONE or rolecounts["clone"][1] == 0): # guaranteed to be cub equiv_sets["amnesiac_clone_cub"] = max(0, equiv_sets["amnesiac_clone_cub"] - 1) rolecounts["wolf cub"][0] = max(0, rolecounts["wolf cub"][0] - 1) rolecounts["wolf cub"][1] = max(0, rolecounts["wolf cub"][1] - 1) elif (var.HIDDEN_CLONE or rolecounts["clone"][1] == 0) and not maybe_cub: # guaranteed to be amnesiac equiv_sets["amnesiac_clone"] = max(0, equiv_sets["amnesiac_clone"] - 1) equiv_sets["amnesiac_clone_cub"] = max(0, equiv_sets["amnesiac_clone_cub"] - 1) rolecounts["amnesiac"][0] = max(0, rolecounts["amnesiac"][0] - 1) rolecounts["amnesiac"][1] = max(0, rolecounts["amnesiac"][1] - 1) elif (var.HIDDEN_AMNESIAC or rolecounts["amnesiac"][1] == 0) and not maybe_cub: # guaranteed to be clone equiv_sets["amnesiac_clone"] = max(0, equiv_sets["amnesiac_clone"] - 1) equiv_sets["amnesiac_clone_cub"] = max(0, equiv_sets["amnesiac_clone_cub"] - 1) rolecounts["clone"][0] = max(0, rolecounts["clone"][0] - 1) rolecounts["clone"][1] = max(0, rolecounts["clone"][1] - 1) else: # could be anything, how exciting! if maybe_cub: maxcount = max(0, equiv_sets["amnesiac_clone_cub"] - 1) else: maxcount = max(0, equiv_sets["amnesiac_clone"] - 1) rolecounts["amnesiac"][0] = max(0, rolecounts["amnesiac"][0] - 1) if rolecounts["amnesiac"][1] > maxcount: rolecounts["amnesiac"][1] = maxcount rolecounts["clone"][0] = max(0, rolecounts["clone"][0] - 1) if rolecounts["clone"][1] > maxcount: rolecounts["clone"][1] = maxcount if maybe_cub: rolecounts["wolf cub"][0] = max(0, rolecounts["wolf cub"][0] - 1) if rolecounts["wolf cub"][1] > maxcount: rolecounts["wolf cub"][1] = maxcount if maybe_cub: equiv_sets["amnesiac_clone_cub"] = maxcount equiv_sets["amnesiac_clone"] = min(equiv_sets["amnesiac_clone"], maxcount) else: equiv_sets["amnesiac_clone"] = maxcount equiv_sets["amnesiac_clone_cub"] = max(maxcount, equiv_sets["amnesiac_clone_cub"] - 1) elif not var.HIDDEN_AMNESIAC and (var.HIDDEN_CLONE or rolecounts["clone"][1] == 0): # guaranteed to be amnesiac overkilling as clone reports as clone equiv_sets["amnesiac_clone"] = max(0, equiv_sets["amnesiac_clone"] - 1) equiv_sets["amnesiac_clone_cub"] = max(0, equiv_sets["amnesiac_clone_cub"] - 1) rolecounts["amnesiac"][0] = max(0, rolecounts["amnesiac"][0] - 1) rolecounts["amnesiac"][1] = max(0, rolecounts["amnesiac"][1] - 1) elif not var.HIDDEN_CLONE and (var.HIDDEN_AMNESIAC or rolecounts["amnesiac"][1] == 0): # guaranteed to be clone overkilling as amnesiac reports as amnesiac equiv_sets["amnesiac_clone"] = max(0, equiv_sets["amnesiac_clone"] - 1) equiv_sets["amnesiac_clone_cub"] = max(0, equiv_sets["amnesiac_clone_cub"] - 1) rolecounts["clone"][0] = max(0, rolecounts["clone"][0] - 1) rolecounts["clone"][1] = max(0, rolecounts["clone"][1] - 1) else: # could be either maxcount = max(0, equiv_sets["amnesiac_clone"] - 1) rolecounts["amnesiac"][0] = max(0, rolecounts["amnesiac"][0] - 1) if rolecounts["amnesiac"][1] > maxcount: rolecounts["amnesiac"][1] = maxcount rolecounts["clone"][0] = max(0, rolecounts["clone"][0] - 1) if rolecounts["clone"][1] > maxcount: rolecounts["clone"][1] = maxcount equiv_sets["amnesiac_clone"] = maxcount equiv_sets["amnesiac_clone_cub"] = max(maxcount, equiv_sets["amnesiac_clone_cub"] - 1) # Step 5. Handle cub growing up. Bot does not send out a message for this, so we need # to puzzle it out ourselves. If there are no amnesiacs or clones # then we can deterministically figure out cubs growing up. Otherwise we don't know for # sure whether or not they grew up. num_realwolves = sum([rolecounts[r][1] for r in var.WOLF_ROLES if r != "wolf cub"]) if num_realwolves == 0: # no wolves means cubs may have turned, set the min cub and max wolf appropriately rolecounts["wolf"][1] += rolecounts["wolf cub"][1] if rolecounts["amnesiac"][1] == 0 and rolecounts["clone"][1] == 0: # we know for sure they grew up rolecounts["wolf"][0] += rolecounts["wolf cub"][0] rolecounts["wolf cub"][1] = 0 rolecounts["wolf cub"][0] = 0 # Finally, combine all of our rolecounts into a message, with the default role last order = [r for r in var.role_order() if r in rolecounts] if var.DEFAULT_ROLE in order: order.remove(var.DEFAULT_ROLE) order.append(var.DEFAULT_ROLE) first = rolecounts[order[0]] if first[0] == first[1] == 1: vb = "is" else: vb = "are" for role in order: count = rolecounts[role] if count[0] == count[1]: if count[0] > 1 or count[0] == 0: if count[0] == 0 and role not in start_roles: continue message.append("\u0002{0}\u0002 {1}".format(count[0] if count[0] else "\u0002no\u0002", var.plural(role))) else: message.append("\u0002{0}\u0002 {1}".format(count[0], role)) else: message.append("\u0002{0}-{1}\u0002 {2}".format(count[0], count[1], var.plural(role))) # Show everything mostly as-is; the only hidden information is which # role was turned into wolf due to alpha bite or lycanthropy totem. # Amnesiac and clone show which roles they turned into. Time lords # and VGs show individually instead of being lumped in the default role, # and traitor is still based on var.HIDDEN_TRAITOR. elif var.STATS_TYPE == "accurate": l1 = [k for k in var.ROLES.keys() if var.ROLES[k]] l2 = [k for k in var.ORIGINAL_ROLES.keys() if var.ORIGINAL_ROLES[k]] rs = set(l1+l2) rs = [role for role in var.role_order() if role in rs] # picky ordering: villager always last if var.DEFAULT_ROLE in rs: rs.remove(var.DEFAULT_ROLE) rs.append(var.DEFAULT_ROLE) bitten_roles = defaultdict(int) lycan_roles = defaultdict(int) for role in var.BITTEN_ROLES.values(): bitten_roles[role] += 1 for role in var.LYCAN_ROLES.values(): lycan_roles[role] += 1 vb = "are" for role in rs: # only show actual roles if role in var.TEMPLATE_RESTRICTIONS.keys(): continue count = len(var.ROLES[role]) if role == "traitor" and var.HIDDEN_TRAITOR: continue elif role == var.DEFAULT_ROLE: if var.HIDDEN_TRAITOR: count += len(var.ROLES["traitor"]) count += bitten_roles["traitor"] count += lycan_roles["traitor"] count += bitten_roles[var.DEFAULT_ROLE] count += lycan_roles[var.DEFAULT_ROLE] elif role == "wolf": count -= sum(bitten_roles.values()) count -= sum(lycan_roles.values()) # GAs turn into FAs, not wolves for bitten_roles # (but turn into wolves for lycan_roles) count += bitten_roles["guardian angel"] elif role == "fallen angel": count -= bitten_roles["guardian angel"] count += bitten_roles["fallen angel"] count += lycan_roles["fallen angel"] else: count += bitten_roles[role] count += lycan_roles[role] if role == rs[0]: if count == 1: vb = "is" else: vb = "are" if count != 1: if count == 0 and len(var.ORIGINAL_ROLES[role]) == 0: continue message.append("\u0002{0}\u0002 {1}".format(count if count else "\u0002no\u0002", var.plural(role))) else: message.append("\u0002{0}\u0002 {1}".format(count, role)) # Only show team affiliation, this may be different than what mystics # and wolf mystics are told since neutrals are split off. Determination # of what numbers are shown is the same as summing up counts in "accurate" elif var.STATS_TYPE == "team": wolfteam = 0 villagers = 0 neutral = 0 for role, players in var.ROLES.items(): if role in var.TEMPLATE_RESTRICTIONS.keys(): continue elif role in var.WOLFTEAM_ROLES: if role == "traitor" and var.HIDDEN_TRAITOR: villagers += len(players) else: wolfteam += len(players) elif role in var.TRUE_NEUTRAL_ROLES: neutral += len(players) else: villagers += len(players) for role in list(var.BITTEN_ROLES.values()) + list(var.LYCAN_ROLES.values()): wolfteam -= 1 if role in var.WOLFTEAM_ROLES: if role == "traitor" and var.HIDDEN_TRAITOR: villagers += 1 else: wolfteam += 1 elif role in var.TRUE_NEUTRAL_ROLES: neutral += 1 else: villagers += 1 message.append("\u0002{0}\u0002 {1}".format(wolfteam if wolfteam else "\u0002no\u0002", "wolf" if wolfteam == 1 else "wolves")) message.append("\u0002{0}\u0002 {1}".format(villagers if villagers else "\u0002no\u0002", "villager" if villagers == 1 else "villagers")) message.append("\u0002{0}\u0002 {1}".format(neutral if neutral else "\u0002no\u0002", "neutral player" if neutral == 1 else "neutral players")) vb = "is" if wolfteam == 1 else "are" stats_mssg = "{0}It is currently {4}. There {3} {1}, and {2}.".format(_nick, ", ".join(message[0:-1]), message[-1], vb, var.PHASE) if nick == chan: pm(cli, nick, stats_mssg) else: if nick in pl or var.PHASE == "join": cli.msg(chan, stats_mssg) else: cli.notice(nick, stats_mssg) def hurry_up(cli, gameid, change): if var.PHASE != "day": return if gameid: if gameid != var.DAY_ID: return chan = botconfig.CHANNEL if not change: cli.msg(chan, ("\u0002As the sun sinks inexorably toward the horizon, turning the lanky pine " + "trees into fire-edged silhouettes, the villagers are reminded that very little " + "time remains for them to reach a decision; if darkness falls before they have done " + "so, the majority will win the vote. No one will be lynched if there " + "are no votes or an even split.\u0002")) return var.DAY_ID = 0 pl = var.list_players() avail = len(pl) - len(var.WOUNDED) - len(var.ASLEEP) votesneeded = avail // 2 + 1 not_lynching = len(var.NO_LYNCH) found_dup = False maxfound = (0, "") votelist = copy.deepcopy(var.VOTES) for votee, voters in votelist.items(): numvotes = 0 for v in var.IMPATIENT: if v in pl and v not in voters and v != votee: voters.append(v) for v in voters: weight = 1 imp_count = var.IMPATIENT.count(v) pac_count = var.PACIFISTS.count(v) if pac_count > imp_count: weight = 0 # more pacifists than impatience totems elif imp_count == pac_count and v not in var.VOTES[votee]: weight = 0 # impatience and pacifist cancel each other out, so don't count impatience if v in var.ROLES["bureaucrat"] or v in var.INFLUENTIAL: # the two do not stack weight *= 2 numvotes += weight if numvotes > maxfound[0]: maxfound = (numvotes, votee) found_dup = False elif numvotes == maxfound[0]: found_dup = True if maxfound[0] > 0 and not found_dup: cli.msg(chan, "The sun sets.") chk_decision(cli, force = maxfound[1]) # Induce a lynch else: cli.msg(chan, ("As the sun sets, the villagers agree to "+ "retire to their beds and wait for morning.")) transition_night(cli) @cmd("fnight", admin_only=True) def fnight(cli, nick, chan, rest): """Forces the day to end and night to begin.""" if var.PHASE != "day": cli.notice(nick, "It is not daytime.") else: hurry_up(cli, 0, True) @cmd("fday", admin_only=True) def fday(cli, nick, chan, rest): """Forces the night to end and the next day to begin.""" if var.PHASE != "night": cli.notice(nick, "It is not nighttime.") else: transition_day(cli) # Specify force = "nick" to force nick to be lynched def chk_decision(cli, force = ""): with var.GRAVEYARD_LOCK: if var.PHASE != "day": return chan = botconfig.CHANNEL pl = var.list_players() avail = len(pl) - len(var.WOUNDED) - len(var.ASLEEP) votesneeded = avail // 2 + 1 not_lynching = var.NO_LYNCH[:] for p in var.PACIFISTS: if p in pl and p not in var.WOUNDED and p not in var.ASLEEP: not_lynching.append(p) # .remove() will only remove the first instance, which means this plays nicely with pacifism countering this for p in var.IMPATIENT: if p in not_lynching: not_lynching.remove(p) # remove duplicates not_lynching = set(not_lynching) # we only need 50%+ to not lynch, instead of an actual majority, because a tie would time out day anyway # don't check for ABSTAIN_ENABLED here since we may have a case where the majority of people have pacifism totems or something if len(not_lynching) >= math.ceil(avail / 2): for p in not_lynching: if p not in var.NO_LYNCH: cli.msg(botconfig.CHANNEL, "\u0002{0}\u0002 meekly votes not to lynch anyone today.".format(p)) cli.msg(botconfig.CHANNEL, "The villagers have agreed not to lynch anybody today.") var.ABSTAINED = True transition_night(cli) return aftermessage = None votelist = copy.deepcopy(var.VOTES) for votee, voters in votelist.items(): impatient_voters = [] numvotes = 0 random.shuffle(var.IMPATIENT) for v in var.IMPATIENT: if v in pl and v not in voters and v != votee and v not in var.WOUNDED and v not in var.ASLEEP: # don't add them in if they have the same number or more of pacifism totems # this matters for desperation totem on the votee imp_count = var.IMPATIENT.count(v) pac_count = var.PACIFISTS.count(v) if pac_count >= imp_count: continue # yes, this means that one of the impatient people will get desperation totem'ed if they didn't # already !vote earlier. sucks to suck. >:) voters.append(v) impatient_voters.append(v) for v in voters[:]: weight = 1 imp_count = var.IMPATIENT.count(v) pac_count = var.PACIFISTS.count(v) if pac_count > imp_count: weight = 0 # more pacifists than impatience totems elif imp_count == pac_count and v not in var.VOTES[votee]: weight = 0 # impatience and pacifist cancel each other out, so don't count impatience if v in var.ROLES["bureaucrat"] or v in var.INFLUENTIAL: # the two do not stack weight *= 2 numvotes += weight if numvotes >= votesneeded or votee == force: for p in impatient_voters: cli.msg(botconfig.CHANNEL, "\u0002{0}\u0002 impatiently votes for \u0002{1}\u0002.".format(p, votee)) # roles that prevent any lynch from happening if votee in var.ROLES["mayor"] and votee not in var.REVEALED_MAYORS: lmsg = ("While being dragged to the gallows, \u0002{0}\u0002 reveals that they " + "are the \u0002mayor\u0002. The village agrees to let them live for now.").format(votee) var.REVEALED_MAYORS.append(votee) votee = None elif votee in var.REVEALED: role = var.get_role(votee) if role == "amnesiac": var.ROLES["amnesiac"].remove(votee) role = var.AMNESIAC_ROLES[votee] var.ROLES[role].append(votee) var.AMNESIACS.append(votee) var.FINAL_ROLES[votee] = role pm(cli, votee, "Your totem clears your amnesia and you now fully remember who you are!") # If wolfteam, don't bother giving list of wolves since night is about to start anyway # Existing wolves also know that someone just joined their team because revealing totem says what they are # If turncoat, set their initial starting side to "none" just in case game ends before they can set it themselves if role == "turncoat": var.TURNCOATS[votee] = ("none", -1) an = "n" if role.startswith(("a", "e", "i", "o", "u")) else "" lmsg = ("Before the rope is pulled, \u0002{0}\u0002's totem emits a brilliant flash of light. " + "When the villagers are able to see again, they discover that {0} has escaped! " + "The left-behind totem seems to have taken on the shape of a{1} \u0002{2}\u0002.").format(votee, an, role) votee = None else: # roles that end the game upon being lynched if votee in var.ROLES["fool"]: # ends game immediately, with fool as only winner lmsg = random.choice(var.LYNCH_MESSAGES).format(votee, "", var.get_reveal_role(votee)) cli.msg(botconfig.CHANNEL, lmsg) if chk_win(cli, winner="@" + votee): return # roles that eliminate other players upon being lynched # note that lovers, assassin, clone, and vengeful ghost are handled in del_player() since they trigger on more than just lynch if votee in var.DESPERATE: # Also kill the very last person to vote them, unless they voted themselves last in which case nobody else dies target = voters[-1] if target != votee: if var.ROLE_REVEAL in ("on", "team"): r1 = var.get_reveal_role(target) an1 = "n" if r1.startswith(("a", "e", "i", "o", "u")) else "" tmsg = ("As the noose is being fitted, \u0002{0}\u0002's totem emits a brilliant flash of light. " + "When the villagers are able to see again, they discover that \u0002{1}\u0002, " + "a{2} \u0002{3}\u0002, has fallen over dead.").format(votee, target, an1, r1) else: tmsg = ("As the noose is being fitted, \u0002{0}\u0002's totem emits a brilliant flash of light. " + "When the villagers are able to see again, they discover that \u0002{1}\u0002 " + "has fallen over dead.").format(votee, target) cli.msg(botconfig.CHANNEL, tmsg) del_player(cli, target, True, end_game = False, killer_role = "shaman") # do not end game just yet, we have more killin's to do! # Other if votee in var.ROLES["jester"]: var.JESTERS.append(votee) if var.ROLE_REVEAL in ("on", "team"): rrole = var.get_reveal_role(votee) an = "n" if rrole.startswith(("a", "e", "i", "o", "u")) else "" lmsg = random.choice(var.LYNCH_MESSAGES).format(votee, an, rrole) else: lmsg = random.choice(var.LYNCH_MESSAGES_NO_REVEAL).format(votee) cli.msg(botconfig.CHANNEL, lmsg) if aftermessage != None: cli.msg(botconfig.CHANNEL, aftermessage) if del_player(cli, votee, True, killer_role = "villager"): transition_night(cli) break @cmd("votes", pm=True, phases=("join", "day", "night")) def show_votes(cli, nick, chan, rest): """Displays the voting statistics.""" pl = var.list_players() if var.PHASE == "join": #get gamemode votes in a dict (key = mode, value = number of votes) gamemode_votes = {} for vote in var.GAMEMODE_VOTES.values(): gamemode_votes[vote] = gamemode_votes.get(vote, 0) + 1 votelist = [] majority = False for gamemode,num_votes in sorted(gamemode_votes.items(), key=lambda x: x[1], reverse=True): #bold the game mode if: we have the right number of players, another game mode doesn't already have the majority, and this gamemode can be picked randomly or has the majority if (len(pl) >= var.GAME_MODES[gamemode][1] and len(pl) <= var.GAME_MODES[gamemode][2] and (not majority or num_votes >= len(pl)/2) and (var.GAME_MODES[gamemode][3] > 0 or num_votes >= len(pl)/2)): votelist.append("\u0002{0}\u0002: {1}".format(gamemode, num_votes)) if num_votes >= len(pl)/2: majority = True else: votelist.append("{0}: {1}".format(gamemode, num_votes)) the_message = ", ".join(votelist) if len(pl) >= var.MIN_PLAYERS: the_message += "{0}Votes needed for a majority: {1}".format("; " if votelist else "", int(math.ceil(len(pl)/2))) elif var.PHASE == "night": cli.notice(nick, "Voting is only during the day.") return else: if (chan != nick and var.LAST_VOTES and var.VOTES_RATE_LIMIT and var.LAST_VOTES + timedelta(seconds=var.VOTES_RATE_LIMIT) > datetime.now()): cli.notice(nick, "This command is rate-limited. Please wait a while " "before using it again.") return _nick = nick + ": " if chan == nick: _nick = "" if chan != nick and nick in pl: var.LAST_VOTES = datetime.now() if not var.VOTES.values(): msg = _nick + "No votes yet." if nick in pl: var.LAST_VOTES = None # reset else: votelist = ["{0}: {1} ({2})".format(votee, len(var.VOTES[votee]), " ".join(var.VOTES[votee])) for votee in var.VOTES.keys()] msg = "{0}{1}".format(_nick, ", ".join(votelist)) if chan == nick: pm(cli, nick, msg) elif nick not in pl and var.PHASE not in ("none", "join"): cli.notice(nick, msg) else: cli.msg(chan, msg) pl = var.list_players() avail = len(pl) - len(var.WOUNDED) - len(var.ASLEEP) votesneeded = avail // 2 + 1 not_voting = len(var.NO_LYNCH) if not_voting == 1: plural = " has" else: plural = "s have" the_message = ("{0}\u0002{1}\u0002 players, \u0002{2}\u0002 votes " "required to lynch, \u0002{3}\u0002 players available to " "vote.").format(_nick, len(pl), votesneeded, avail) if var.ABSTAIN_ENABLED: the_message += " \u0002{0}\u0002 player{1} refrained from voting.".format(not_voting, plural) if chan == nick: pm(cli, nick, the_message) elif nick not in pl and var.PHASE != "join": cli.notice(nick, the_message) else: cli.msg(chan, the_message) def chk_traitor(cli): realwolves = var.WOLF_ROLES[:] realwolves.remove("wolf cub") if len(var.list_players(realwolves)) > 0: return # actual wolves still alive wcl = copy.copy(var.ROLES["wolf cub"]) ttl = copy.copy(var.ROLES["traitor"]) event = Event("chk_traitor", {}) if event.dispatch(cli, var, wcl, ttl): for wc in wcl: var.ROLES["wolf"].append(wc) var.ROLES["wolf cub"].remove(wc) var.FINAL_ROLES[wc] = "wolf" pm(cli, wc, "You have grown up into a wolf and vowed to take revenge for your dead parents!") debuglog(wc, "(wolf cub) GROW UP") if len(var.ROLES["wolf"]) == 0: for tt in ttl: var.ROLES["wolf"].append(tt) var.ROLES["traitor"].remove(tt) var.FINAL_ROLES[tt] = "wolf" if tt in var.ROLES["cursed villager"]: var.ROLES["cursed villager"].remove(tt) pm(cli, tt, "HOOOOOOOOOWL. You have become... a wolf!\n"+ "It is up to you to avenge your fallen leaders!") debuglog(tt, "(traitor) TURNING") if len(var.ROLES["wolf"]) > 0: var.TRAITOR_TURNED = True cli.msg(botconfig.CHANNEL, "\u0002The villagers, during their celebrations, are "+ "frightened as they hear a loud howl. The wolves are "+ "not gone!\u0002") def stop_game(cli, winner = "", abort = False): chan = botconfig.CHANNEL if abort: cli.msg(chan, "The role attribution failed 3 times. Game was canceled.") if var.DAY_START_TIME: now = datetime.now() td = now - var.DAY_START_TIME var.DAY_TIMEDELTA += td if var.NIGHT_START_TIME: now = datetime.now() td = now - var.NIGHT_START_TIME var.NIGHT_TIMEDELTA += td daymin, daysec = var.DAY_TIMEDELTA.seconds // 60, var.DAY_TIMEDELTA.seconds % 60 nitemin, nitesec = var.NIGHT_TIMEDELTA.seconds // 60, var.NIGHT_TIMEDELTA.seconds % 60 total = var.DAY_TIMEDELTA + var.NIGHT_TIMEDELTA tmin, tsec = total.seconds // 60, total.seconds % 60 gameend_msg = ("Game lasted \u0002{0:0>2}:{1:0>2}\u0002. " + "\u0002{2:0>2}:{3:0>2}\u0002 was day. " + "\u0002{4:0>2}:{5:0>2}\u0002 was night. ").format(tmin, tsec, daymin, daysec, nitemin, nitesec) if not abort: cli.msg(chan, gameend_msg) roles_msg = [] origroles = {} #nick based list of original roles rolelist = copy.deepcopy(var.ORIGINAL_ROLES) for role, playerlist in var.ORIGINAL_ROLES.items(): if role in var.TEMPLATE_RESTRICTIONS.keys(): continue for p in playerlist: player = p #with (dced) still in if p.startswith("(dced)"): p = p[6:] # Show cubs and traitors as themselves even if they turned into wolf if p in var.FINAL_ROLES and var.FINAL_ROLES[p] != role and (var.FINAL_ROLES[p] != "wolf" or role not in ("wolf cub", "traitor")): origroles[p] = role rolelist[role].remove(player) rolelist[var.FINAL_ROLES[p]].append(p) prev = False for role in var.role_order(): if len(rolelist[role]) == 0: continue playersformatted = [] for p in rolelist[role]: if p.startswith("(dced)"): p = p[6:] if p in origroles and role not in var.TEMPLATE_RESTRICTIONS.keys(): playersformatted.append("\u0002{0}\u0002 ({1}{2})".format(p, "" if prev else "was ", origroles[p])) prev = True elif role == "amnesiac": playersformatted.append("\u0002{0}\u0002 (would be {1})".format(p, var.AMNESIAC_ROLES[p])) else: playersformatted.append("\u0002{0}\u0002".format(p)) if len(rolelist[role]) == 2: msg = "The {1} were {0[0]} and {0[1]}." roles_msg.append(msg.format(playersformatted, var.plural(role))) elif len(rolelist[role]) == 1: roles_msg.append("The {1} was {0[0]}.".format(playersformatted, role)) else: msg = "The {2} were {0}, and {1}." roles_msg.append(msg.format(", ".join(playersformatted[0:-1]), playersformatted[-1], var.plural(role))) message = "" count = 0 if not abort: done = {} lovers = [] for lover1, llist in var.ORIGINAL_LOVERS.items(): for lover2 in llist: # check if already said the pairing if (lover1 in done and lover2 in done[lover1]) or (lover2 in done and lover1 in done[lover2]): continue lovers.append("\u0002{0}\u0002/\u0002{1}\u0002".format(lover1, lover2)) if lover1 in done: done[lover1].append(lover2) else: done[lover1] = [lover2] if len(lovers) == 1 or len(lovers) == 2: roles_msg.append("The lovers were {0}.".format(" and ".join(lovers))) elif len(lovers) > 2: roles_msg.append("The lovers were {0}, and {1}".format(", ".join(lovers[0:-1]), lovers[-1])) cli.msg(chan, var.break_long_message(roles_msg)) # Only update if someone actually won, "" indicates everyone died or abnormal game stop if winner != "": plrl = {} winners = [] for role,ppl in var.ORIGINAL_ROLES.items(): if role in var.TEMPLATE_RESTRICTIONS.keys(): continue for x in ppl: if x != None: if x in var.FINAL_ROLES: plrl[x] = var.FINAL_ROLES[x] else: plrl[x] = role for plr, rol in plrl.items(): orol = rol # original role, since we overwrite rol in case of clone splr = plr # plr stripped of the (dced) bit at the front, since other dicts don't have that # TODO: figure out how player stats should work when var.DISABLE_ACCOUNTS is True; likely track by nick if plr.startswith("(dced)") and plr[6:] in var.DCED_PLAYERS.keys(): splr = plr[6:] if var.DISABLE_ACCOUNTS: acc = splr else: acc = var.DCED_PLAYERS[plr[6:]]["account"] elif plr in var.PLAYERS.keys(): if var.DISABLE_ACCOUNTS: acc = plr else: acc = var.PLAYERS[plr]["account"] else: acc = "*" #probably fjoin'd fake won = False iwon = False # determine if this player's team won if rol in var.WOLFTEAM_ROLES: # the player was wolf-aligned if winner == "wolves": won = True elif rol in var.TRUE_NEUTRAL_ROLES: # most true neutral roles never have a team win, only individual wins if winner == "monsters" and rol == "monster": won = True if winner == "pipers" and rol == "piper": won = True if rol == "turncoat" and splr in var.TURNCOATS and var.TURNCOATS[splr][0] != "none": won = (winner == var.TURNCOATS[splr][0]) elif rol in ("amnesiac", "vengeful ghost") and splr not in var.VENGEFUL_GHOSTS: if var.DEFAULT_ROLE == "villager" and winner == "villagers": won = True elif var.DEFAULT_ROLE == "cultist" and winner == "wolves": won = True elif winner == "villagers": won = True survived = var.list_players() if plr.startswith("(dced)"): # You get NOTHING! You LOSE! Good DAY, sir! won = False iwon = False elif splr in var.LOVERS and splr in survived and len([x for x in var.LOVERS[splr] if x in survived]) > 0: for lvr in var.LOVERS[splr]: if lvr not in survived: # cannot win with dead lover (if splr in survived and lvr is not, that means lvr idled out) continue lvrrol = "" #somehow lvrrol wasn't set and caused a crash once if lvr in plrl: lvrrol = plrl[lvr] if not winner.startswith("@") and winner not in ("monsters", "pipers"): iwon = True break elif winner.startswith("@") and winner == "@" + lvr and var.LOVER_WINS_WITH_FOOL: iwon = True break elif winner == "monsters" and lvrrol == "monster": iwon = True break elif winner == "pipers" and lvrrol == "piper": iwon = True break elif rol == "fool" and "@" + splr == winner: iwon = True elif rol == "monster" and splr in survived and winner == "monsters": iwon = True elif rol == "piper" and splr in survived and winner == "pipers": iwon = True elif rol == "crazed shaman" or rol == "clone": # For clone, this means they ended game while being clone and not some other role if splr in survived and not winner.startswith("@") and winner not in ("monsters", "pipers"): iwon = True elif rol == "vengeful ghost": if not winner.startswith("@") and winner not in ("monsters", "pipers"): if won and splr in survived: iwon = True elif splr in var.VENGEFUL_GHOSTS and var.VENGEFUL_GHOSTS[splr] == "villagers" and winner == "wolves": won = True iwon = True elif splr in var.VENGEFUL_GHOSTS and var.VENGEFUL_GHOSTS[splr] == "!villagers" and winner == "wolves": # Starts with ! if they were driven off by retribution totem won = True iwon = False elif splr in var.VENGEFUL_GHOSTS and var.VENGEFUL_GHOSTS[splr] == "wolves" and winner == "villagers": won = True iwon = True elif splr in var.VENGEFUL_GHOSTS and var.VENGEFUL_GHOSTS[splr] == "!wolves" and winner == "villagers": won = True iwon = False else: won = False iwon = False elif rol == "jester" and splr in var.JESTERS: iwon = True elif not iwon: iwon = won and splr in survived # survived, team won = individual win if acc != "*": var.update_role_stats(acc, orol, won, iwon) for role in var.TEMPLATE_RESTRICTIONS.keys(): if plr in var.ORIGINAL_ROLES[role]: var.update_role_stats(acc, role, won, iwon) if splr in var.LOVERS: var.update_role_stats(acc, "lover", won, iwon) if won or iwon: winners.append(splr) var.update_game_stats(var.CURRENT_GAMEMODE.name, len(survived) + len(var.DEAD), winner) # spit out the list of winners winners.sort() if len(winners) == 1: cli.msg(chan, "The winner is \u0002{0}\u0002.".format(winners[0])) elif len(winners) == 2: cli.msg(chan, "The winners are \u0002{0}\u0002 and \u0002{1}\u0002.".format(winners[0], winners[1])) elif len(winners) > 2: nicklist = ("\u0002" + x + "\u0002" for x in winners[0:-1]) cli.msg(chan, "The winners are {0}, and \u0002{1}\u0002.".format(", ".join(nicklist), winners[-1])) reset_modes_timers(cli) reset() # This must be after reset() if var.AFTER_FLASTGAME is not None: var.AFTER_FLASTGAME() var.AFTER_FLASTGAME = None if var.ADMIN_TO_PING: # It was an flastgame cli.msg(chan, "PING! " + var.ADMIN_TO_PING) var.ADMIN_TO_PING = None return True def chk_win(cli, end_game = True, winner = None): """ Returns True if someone won """ chan = botconfig.CHANNEL lpl = len(var.list_players()) if var.PHASE == "join": if lpl == 0: reset_modes_timers(cli) reset() # This must be after reset() if var.AFTER_FLASTGAME is not None: var.AFTER_FLASTGAME() var.AFTER_FLASTGAME = None if var.ADMIN_TO_PING: # It was an flastgame cli.msg(chan, "PING! " + var.ADMIN_TO_PING) var.ADMIN_TO_PING = None return True return False with var.GRAVEYARD_LOCK: if var.PHASE not in ("day", "night"): return False #some other thread already ended game probably lwolves = len(var.list_players(var.WOLFCHAT_ROLES)) cubs = len(var.ROLES["wolf cub"]) if "wolf cub" in var.ROLES else 0 lrealwolves = len(var.list_players(var.WOLF_ROLES)) - cubs monsters = len(var.ROLES["monster"]) if "monster" in var.ROLES else 0 traitors = len(var.ROLES["traitor"]) if "traitor" in var.ROLES else 0 lpipers = len(var.ROLES["piper"]) if "piper" in var.ROLES else 0 if var.PHASE == "day": for p in var.WOUNDED + var.ASLEEP: try: role = var.get_role(p) if role in var.WOLFCHAT_ROLES: lwolves -= 1 else: lpl -= 1 except KeyError: pass message = "" # fool won, chk_win was called from !lynch if winner and winner.startswith("@"): message = "Game over! The fool has been lynched, causing them to win." elif lpl < 1: message = "Game over! There are no players remaining. Nobody wins." winner = "none" elif var.PHASE == "day" and lpipers and len(var.list_players()) - lpipers == len(var.CHARMED - set(var.ROLES["piper"])): winner = "pipers" message = ("Game over! Everyone has fallen victim to the charms of the " + "piper{0}. The piper{0} lead{1} the villagers away from the village, " + "never to return...").format("s" if lpipers > 1 else "", "s" if lpipers == 1 else "") elif lrealwolves == 0 and traitors == 0 and cubs == 0: if monsters > 0: plural = "s" if monsters > 1 else "" message = ("Game over! All the wolves are dead! As the villagers start preparing the BBQ, " + "the monster{0} quickly kill{1} the remaining villagers, " + "causing the monster{0} to win.").format(plural, "" if plural else "s") winner = "monsters" else: message = ("Game over! All the wolves are dead! The villagers " + "chop them up, BBQ them, and have a hearty meal.") winner = "villagers" elif lwolves == lpl / 2: if monsters > 0: plural = "s" if monsters > 1 else "" message = ("Game over! There are the same number of wolves as uninjured villagers. " + "The wolves overpower the villagers but then get destroyed by the monster{0}, " + "causing the monster{0} to win.").format(plural) winner = "monsters" else: message = ("Game over! There are the same number of wolves as " + "uninjured villagers. The wolves overpower the villagers and win.") winner = "wolves" elif lwolves > lpl / 2: if monsters > 0: plural = "s" if monsters > 1 else "" message = ("Game over! There are more wolves than uninjured villagers. " + "The wolves overpower the villagers but then get destroyed by the monster{0}, " + "causing the monster{0} to win.").format(plural) winner = "monsters" else: message = ("Game over! There are more wolves than "+ "uninjured villagers. The wolves overpower the villagers and win.") winner = "wolves" elif lrealwolves == 0: chk_traitor(cli) return chk_win(cli, end_game) event = Event("chk_win", {"winner": winner, "message": message}) event.dispatch(var, lpl, lwolves, lrealwolves) winner = event.data["winner"] message = event.data["message"] if winner is None: return False if end_game: players = [] if winner == "monsters": for plr in var.ROLES["monster"]: players.append("{0} ({1})".format(plr, var.get_role(plr))) elif winner == "wolves": for plr in var.list_players(var.WOLFTEAM_ROLES): players.append("{0} ({1})".format(plr, var.get_role(plr))) elif winner == "villagers": vroles = (role for role in var.ROLES.keys() if var.ROLES[role] and role not in (var.WOLFTEAM_ROLES + var.TRUE_NEUTRAL_ROLES + list(var.TEMPLATE_RESTRICTIONS.keys()))) for plr in var.list_players(vroles): players.append("{0} ({1})".format(plr, var.get_role(plr))) elif winner == "pipers": for plr in var.ROLES["piper"]: players.append("{0} ({1})".format(plr, var.get_role(plr))) debuglog("WIN:", winner) debuglog("PLAYERS:", ", ".join(players)) cli.msg(chan, message) stop_game(cli, winner) return True def del_player(cli, nick, forced_death = False, devoice = True, end_game = True, death_triggers = True, killer_role = "", deadlist = [], original = "", cmode = [], ismain = True): """ Returns: False if one side won. arg: forced_death = True when lynched or when the seer/wolf both don't act """ t = time.time() # time var.LAST_STATS = None # reset var.LAST_VOTES = None with var.GRAVEYARD_LOCK: if not var.GAME_ID or var.GAME_ID > t: # either game ended, or a new game has started. return False ret = True pl = var.list_players() for dead in deadlist: if dead in pl: pl.remove(dead) if nick != None and (nick == original or nick in pl): nickrole = var.get_role(nick) nicktpls = var.get_templates(nick) var.del_player(nick) if nick in pl: pl.remove(nick) # handle roles that trigger on death # clone happens regardless of death_triggers being true or not if var.PHASE in ("night", "day"): clones = copy.copy(var.ROLES["clone"]) for clone in clones: if clone in var.CLONED: target = var.CLONED[clone] if nick == target and clone in var.CLONED: # clone is cloning nick, so clone becomes nick's role # clone does NOT get any of nick's templates (gunner/assassin/etc.) del var.CLONED[clone] var.ROLES["clone"].remove(clone) if nickrole == "amnesiac": # clone gets the amnesiac's real role sayrole = var.AMNESIAC_ROLES[nick] var.FINAL_ROLES[clone] = sayrole var.ROLES[sayrole].append(clone) else: var.ROLES[nickrole].append(clone) var.FINAL_ROLES[clone] = nickrole sayrole = nickrole debuglog("{0} (clone) CLONE DEAD PLAYER: {1} ({2})".format(clone, target, sayrole)) # if cloning time lord or vengeful ghost, say they are villager instead if sayrole == "time lord": sayrole = "villager" elif sayrole == "vengeful ghost": sayrole = var.DEFAULT_ROLE an = "n" if sayrole.startswith(("a", "e", "i", "o", "u")) else "" pm(cli, clone, "You are now a{0} \u0002{1}\u0002.".format(an, sayrole)) # if a clone is cloning a clone, clone who the old clone cloned if nickrole == "clone" and nick in var.CLONED: if var.CLONED[nick] == clone: pm(cli, clone, "It appears that \u0002{0}\u0002 was cloning you, so you are now stuck as a clone forever. How sad.".format(nick)) else: var.CLONED[clone] = var.CLONED[nick] pm(cli, clone, "You will now be cloning \u0002{0}\u0002 if they die.".format(var.CLONED[clone])) debuglog("{0} (clone) CLONE: {1} ({2})".format(clone, var.CLONED[clone], var.get_role(var.CLONED[clone]))) elif nickrole in var.WOLFCHAT_ROLES: wolves = var.list_players(var.WOLFCHAT_ROLES) wolves.remove(clone) # remove self from list for wolf in wolves: pm(cli, wolf, "\u0002{0}\u0002 cloned \u0002{1}\u0002 and has now become a wolf!".format(clone, nick)) if var.PHASE == "day": random.shuffle(wolves) for i, wolf in enumerate(wolves): wolfrole = var.get_role(wolf) cursed = "" if wolf in var.ROLES["cursed villager"]: cursed = "cursed " wolves[i] = "\u0002{0}\u0002 ({1}{2})".format(wolf, cursed, wolfrole) if len(wolves): pm(cli, clone, "Wolves: " + ", ".join(wolves)) else: pm(cli, clone, "There are no other wolves") elif nickrole == "turncoat": var.TURNCOATS[clone] = ("none", -1) if nickrole == "clone" and nick in var.CLONED: del var.CLONED[nick] if death_triggers and var.PHASE in ("night", "day"): if nick in var.LOVERS: others = copy.copy(var.LOVERS[nick]) del var.LOVERS[nick][:] for other in others: if other not in pl: continue # already died somehow if nick not in var.LOVERS[other]: continue var.LOVERS[other].remove(nick) if var.ROLE_REVEAL in ("on", "team"): role = var.get_reveal_role(other) an = "n" if role.startswith(("a", "e", "i", "o", "u")) else "" message = ("Saddened by the loss of their lover, \u0002{0}\u0002, " + "a{1} \u0002{2}\u0002, commits suicide.").format(other, an, role) else: message = "Saddened by the loss of their lover, \u0002{0}\u0002 commits suicide.".format(other) cli.msg(botconfig.CHANNEL, message) debuglog("{0} ({1}) LOVE SUICIDE: {2} ({3})".format(other, var.get_role(other), nick, nickrole)) del_player(cli, other, True, end_game = False, killer_role = killer_role, deadlist = deadlist, original = original, ismain = False) pl.remove(other) if "assassin" in nicktpls: if nick in var.TARGETED: target = var.TARGETED[nick] del var.TARGETED[nick] if target != None and target in pl: if "totem" in var.ACTIVE_PROTECTIONS[target] and nickrole != "fallen angel": var.ACTIVE_PROTECTIONS[target].remove("totem") message = ("Before dying, \u0002{0}\u0002 quickly attempts to slit \u0002{1}\u0002's throat; " + "however, {1}'s totem emits a brilliant flash of light, causing the attempt to miss.").format(nick, target) cli.msg(botconfig.CHANNEL, message) elif "angel" in var.ACTIVE_PROTECTIONS[target] and nickrole != "fallen angel": var.ACTIVE_PROTECTIONS[target].remove("angel") message = ("Before dying, \u0002{0}\u0002 quickly attempts to slit \u0002{1}\u0002's throat; " + "however, a guardian angel was on duty and able to foil the attempt.").format(nick, target) cli.msg(botconfig.CHANNEL, message) elif "bodyguard" in var.ACTIVE_PROTECTIONS[target] and nickrole != "fallen angel": var.ACTIVE_PROTECTIONS[target].remove("bodyguard") for ga in var.ROLES["bodyguard"]: if var.GUARDED.get(ga) == target: message = ("Before dying, \u0002{0}\u0002 quickly attempts to slit \u0002{1}\u0002's throat; " + "however, \u0002{2}\u0002, a bodyguard, sacrificed their life to protect them.").format(nick, target, ga) cli.msg(botconfig.CHANNEL, message) del_player(cli, ga, True, end_game = False, killer_role = nickrole, deadlist = deadlist, original = original, ismain = False) pl.remove(ga) break else: if var.ROLE_REVEAL in ("on", "team"): role = var.get_reveal_role(target) an = "n" if role.startswith(("a", "e", "i", "o", "u")) else "" message = ("Before dying, \u0002{0}\u0002 quickly slits \u0002{1}\u0002's throat. " + "The village mourns the loss of a{2} \u0002{3}\u0002.").format(nick, target, an, role) else: message = "Before dying, \u0002{0}\u0002 quickly slits \u0002{1}\u0002's throat.".format(nick, target) cli.msg(botconfig.CHANNEL, message) debuglog("{0} ({1}) ASSASSINATE: {2} ({3})".format(nick, nickrole, target, var.get_role(target))) del_player(cli, target, True, end_game = False, killer_role = nickrole, deadlist = deadlist, original = original, ismain = False) pl.remove(target) if nickrole == "time lord": if "DAY_TIME_LIMIT" not in var.ORIGINAL_SETTINGS: var.ORIGINAL_SETTINGS["DAY_TIME_LIMIT"] = var.DAY_TIME_LIMIT if "DAY_TIME_WARN" not in var.ORIGINAL_SETTINGS: var.ORIGINAL_SETTINGS["DAY_TIME_WARN"] = var.DAY_TIME_WARN if "SHORT_DAY_LIMIT" not in var.ORIGINAL_SETTINGS: var.ORIGINAL_SETTINGS["SHORT_DAY_LIMIT"] = var.SHORT_DAY_LIMIT if "SHORT_DAY_WARN" not in var.ORIGINAL_SETTINGS: var.ORIGINAL_SETTINGS["SHORT_DAY_WARN"] = var.SHORT_DAY_WARN if "NIGHT_TIME_LIMIT" not in var.ORIGINAL_SETTINGS: var.ORIGINAL_SETTINGS["NIGHT_TIME_LIMIT"] = var.NIGHT_TIME_LIMIT if "NIGHT_TIME_WARN" not in var.ORIGINAL_SETTINGS: var.ORIGINAL_SETTINGS["NIGHT_TIME_WARN"] = var.NIGHT_TIME_WARN var.DAY_TIME_LIMIT = var.TIME_LORD_DAY_LIMIT var.DAY_TIME_WARN = var.TIME_LORD_DAY_WARN var.SHORT_DAY_LIMIT = var.TIME_LORD_DAY_LIMIT var.SHORT_DAY_WARN = var.TIME_LORD_DAY_WARN var.NIGHT_TIME_LIMIT = var.TIME_LORD_NIGHT_LIMIT var.NIGHT_TIME_WARN = var.TIME_LORD_NIGHT_WARN cli.msg(botconfig.CHANNEL, ("Tick tock! Since the time lord has died, " + "day will now only last {0} seconds and night will now only " + "last {1} seconds!").format(var.TIME_LORD_DAY_LIMIT, var.TIME_LORD_NIGHT_LIMIT)) if var.GAMEPHASE == "day" and timeleft_internal("day") > var.DAY_TIME_LIMIT and var.DAY_TIME_LIMIT > 0: if "day" in var.TIMERS: var.TIMERS["day"][0].cancel() t = threading.Timer(var.DAY_TIME_LIMIT, hurry_up, [cli, var.DAY_ID, True]) var.TIMERS["day"] = (t, time.time(), var.DAY_TIME_LIMIT) t.daemon = True t.start() # Don't duplicate warnings, e.g. only set the warn timer if a warning was not already given if "day_warn" in var.TIMERS and var.TIMERS["day_warn"][0].isAlive(): var.TIMERS["day_warn"][0].cancel() t = threading.Timer(var.DAY_TIME_WARN, hurry_up, [cli, var.DAY_ID, False]) var.TIMERS["day_warn"] = (t, time.time(), var.DAY_TIME_WARN) t.daemon = True t.start() elif var.GAMEPHASE == "night" and timeleft_internal("night") > var.NIGHT_TIME_LIMIT and var.NIGHT_TIME_LIMIT > 0: if "night" in var.TIMERS: var.TIMERS["night"][0].cancel() t = threading.Timer(var.NIGHT_TIME_LIMIT, hurry_up, [cli, var.NIGHT_ID, True]) var.TIMERS["night"] = (t, time.time(), var.NIGHT_TIME_LIMIT) t.daemon = True t.start() # Don't duplicate warnings, e.g. only set the warn timer if a warning was not already given if "night_warn" in var.TIMERS and var.TIMERS["night_warn"][0].isAlive(): var.TIMERS["night_warn"][0].cancel() t = threading.Timer(var.NIGHT_TIME_WARN, hurry_up, [cli, var.NIGHT_ID, False]) var.TIMERS["night_warn"] = (t, time.time(), var.NIGHT_TIME_WARN) t.daemon = True t.start() debuglog(nick, "(time lord) TRIGGER") if nickrole == "vengeful ghost": if killer_role in var.WOLFTEAM_ROLES: var.VENGEFUL_GHOSTS[nick] = "wolves" else: var.VENGEFUL_GHOSTS[nick] = "villagers" pm(cli, nick, ("OOOooooOOOOooo! You are the \u0002vengeful ghost\u0002. It is now your job " + "to exact your revenge on the \u0002{0}\u0002 that killed you.").format(var.VENGEFUL_GHOSTS[nick])) debuglog(nick, "(vengeful ghost) TRIGGER", var.VENGEFUL_GHOSTS[nick]) if nickrole == "wolf cub": var.ANGRY_WOLVES = True if nickrole in var.WOLF_ROLES: if var.GAMEPHASE == "day": var.ALPHA_ENABLED = True for bitten, days in var.BITTEN.items(): brole = var.get_role(bitten) if brole not in var.WOLF_ROLES and days > 0: var.BITTEN[bitten] -= 1 pm(cli, bitten, ("Upon gazing at {0}'s lifeless body, you feel a sharp pang of regret and vengeance. " + "You quickly look away and the feelings subside...").format(nick)) if nickrole == "mad scientist": # kills the 2 players adjacent to them in the original players listing (in order of !joining) # if those players are already dead, nothing happens index = var.ALL_PLAYERS.index(nick) targets = [] target1 = var.ALL_PLAYERS[index - 1] target2 = var.ALL_PLAYERS[index + 1 if index < len(var.ALL_PLAYERS) - 1 else 0] if len(var.ALL_PLAYERS) >= var.MAD_SCIENTIST_SKIPS_DEAD_PLAYERS: # determine left player i = index while True: i -= 1 if i < 0: i = len(var.ALL_PLAYERS) - 1 if var.ALL_PLAYERS[i] in pl or var.ALL_PLAYERS[i] == nick: target1 = var.ALL_PLAYERS[i] break # determine right player i = index while True: i += 1 if i >= len(var.ALL_PLAYERS): i = 0 if var.ALL_PLAYERS[i] in pl or var.ALL_PLAYERS[i] == nick: target2 = var.ALL_PLAYERS[i] break if target1 in pl: if target2 in pl and target1 != target2: if var.ROLE_REVEAL in ("on", "team"): r1 = var.get_reveal_role(target1) an1 = "n" if r1.startswith(("a", "e", "i", "o", "u")) else "" r2 = var.get_reveal_role(target2) an2 = "n" if r2.startswith(("a", "e", "i", "o", "u")) else "" tmsg = ("\u0002{0}\u0002 throws " + "a potent chemical concoction into the crowd. \u0002{1}\u0002, " + "a{2} \u0002{3}\u0002, and \u0002{4}\u0002, a{5} \u0002{6}\u0002, " + "get hit by the chemicals and die.").format(nick, target1, an1, r1, target2, an2, r2) else: tmsg = ("\u0002{0}\u0002 throws " + "a potent chemical concoction into the crowd. \u0002{1}\u0002 " + "and \u0002{2}\u0002 get hit by the chemicals and die.").format(nick, target1, target2) cli.msg(botconfig.CHANNEL, tmsg) debuglog(nick, "(mad scientist) KILL: {0} ({1}) - {2} ({3})".format(target1, var.get_role(target1), target2, var.get_role(target2))) deadlist1 = copy.copy(deadlist) deadlist1.append(target2) deadlist2 = copy.copy(deadlist) deadlist2.append(target1) del_player(cli, target1, True, end_game = False, killer_role = "mad scientist", deadlist = deadlist1, original = original, ismain = False) del_player(cli, target2, True, end_game = False, killer_role = "mad scientist", deadlist = deadlist2, original = original, ismain = False) pl.remove(target1) pl.remove(target2) else: if var.ROLE_REVEAL in ("on", "team"): r1 = var.get_reveal_role(target1) an1 = "n" if r1.startswith(("a", "e", "i", "o", "u")) else "" tmsg = ("\u0002{0}\u0002 throws " + "a potent chemical concoction into the crowd. \u0002{1}\u0002, " + "a{2} \u0002{3}\u0002, gets hit by the chemicals and dies.").format(nick, target1, an1, r1) else: tmsg = ("\u0002{0}\u0002 throws " + "a potent chemical concoction into the crowd. \u0002{1}\u0002 " + "gets hit by the chemicals and dies.").format(nick, target1) cli.msg(botconfig.CHANNEL, tmsg) debuglog(nick, "(mad scientist) KILL: {0} ({1})".format(target1, var.get_role(target1))) del_player(cli, target1, True, end_game = False, killer_role = "mad scientist", deadlist = deadlist, original = original, ismain = False) pl.remove(target1) else: if target2 in pl: if var.ROLE_REVEAL in ("on", "team"): r2 = var.get_reveal_role(target2) an2 = "n" if r2.startswith(("a", "e", "i", "o", "u")) else "" tmsg = ("\u0002{0}\u0002 throws " + "a potent chemical concoction into the crowd. \u0002{1}\u0002, " + "a{2} \u0002{3}\u0002, gets hit by the chemicals and dies.").format(nick, target2, an2, r2) else: tmsg = ("\u0002{0}\u0002 throws " + "a potent chemical concoction into the crowd. \u0002{1}\u0002 " + "gets hit by the chemicals and dies.").format(nick, target2) cli.msg(botconfig.CHANNEL, tmsg) debuglog(nick, "(mad scientist) KILL: {0} ({1})".format(target2, var.get_role(target2))) del_player(cli, target2, True, end_game = False, killer_role = "mad scientist", deadlist = deadlist, original = original, ismain = False) pl.remove(target2) else: tmsg = ("\u0002{0}\u0002 throws " + "a potent chemical concoction into the crowd. Thankfully, " + "nobody seems to have gotten hit.").format(nick) cli.msg(botconfig.CHANNEL, tmsg) debuglog(nick, "(mad scientist) KILL FAIL") if devoice: cmode.append(("-v", nick)) if var.PHASE == "join": if nick in var.GAMEMODE_VOTES: del var.GAMEMODE_VOTES[nick] # Died during the joining process as a person if var.AUTO_TOGGLE_MODES and nick in var.USERS and var.USERS[nick]["moded"]: for newmode in var.USERS[nick]["moded"]: cmode.append(("+"+newmode, nick)) var.USERS[nick]["modes"].update(var.USERS[nick]["moded"]) var.USERS[nick]["moded"] = set() ret = not chk_win(cli) else: # Died during the game, so quiet! if var.QUIET_DEAD_PLAYERS and not is_fake_nick(nick): cmode.append(("+{0}".format(var.QUIET_MODE), var.QUIET_PREFIX+nick+"!*@*")) if nick not in var.DEAD: var.DEAD.append(nick) ret = not chk_win(cli, end_game) if var.PHASE in ("night", "day") and ret: # remove the player from variables if they're in there for a,b in list(var.KILLS.items()): for n in b: #var.KILLS can have 2 kills in a list if n == nick: var.KILLS[a].remove(nick) if a == nick or len(var.KILLS[a]) == 0: del var.KILLS[a] for x in (var.OBSERVED, var.HVISITED, var.GUARDED, var.TARGETED, var.LASTGUARDED, var.LASTGIVEN, var.LASTHEXED, var.OTHER_KILLS, var.SHAMANS): keys = list(x.keys()) for k in keys: if k == nick: del x[k] elif x[k] == nick: del x[k] if nick in var.DISCONNECTED: del var.DISCONNECTED[nick] if var.PHASE == "night": # remove players from night variables # the dicts are handled above, these are the lists of who has acted which is used to determine whether night should end # if these aren't cleared properly night may end prematurely for x in (var.SEEN, var.PASSED, var.HUNTERS, var.HEXED): if nick in x: x.remove(nick) if var.PHASE == "day" and not forced_death and ret: # didn't die from lynching if nick in var.VOTES.keys(): del var.VOTES[nick] # Delete other people's votes on the player for k in list(var.VOTES.keys()): if nick in var.VOTES[k]: var.VOTES[k].remove(nick) if not var.VOTES[k]: # no more votes on that person del var.VOTES[k] break # can only vote once if nick in var.NO_LYNCH: var.NO_LYNCH.remove(nick) if nick in var.WOUNDED: var.WOUNDED.remove(nick) if nick in var.ASLEEP: var.ASLEEP.remove(nick) chk_decision(cli) elif var.PHASE == "night" and ret: chk_nightdone(cli) if ismain: mass_mode(cli, cmode, []) del cmode[:] # clear list return ret def reaper(cli, gameid): # check to see if idlers need to be killed. var.IDLE_WARNED = set() var.IDLE_WARNED_PM = set() chan = botconfig.CHANNEL while gameid == var.GAME_ID: with var.GRAVEYARD_LOCK: # Terminate reaper when game ends if var.PHASE not in ("day", "night"): return if var.WARN_IDLE_TIME or var.PM_WARN_IDLE_TIME or var.KILL_IDLE_TIME: # only if enabled to_warn = [] to_warn_pm = [] to_kill = [] for nick in var.list_players(): if is_fake_nick(nick): continue lst = var.LAST_SAID_TIME.get(nick, var.GAME_START_TIME) tdiff = datetime.now() - lst if var.WARN_IDLE_TIME and (tdiff > timedelta(seconds=var.WARN_IDLE_TIME) and nick not in var.IDLE_WARNED): to_warn.append(nick) var.IDLE_WARNED.add(nick) var.LAST_SAID_TIME[nick] = (datetime.now() - timedelta(seconds=var.WARN_IDLE_TIME)) # Give them a chance elif var.PM_WARN_IDLE_TIME and (tdiff > timedelta(seconds=var.PM_WARN_IDLE_TIME) and nick not in var.IDLE_WARNED_PM): to_warn_pm.append(nick) var.IDLE_WARNED_PM.add(nick) var.LAST_SAID_TIME[nick] = (datetime.now() - timedelta(seconds=var.PM_WARN_IDLE_TIME)) elif var.KILL_IDLE_TIME and (tdiff > timedelta(seconds=var.KILL_IDLE_TIME) and (not var.WARN_IDLE_TIME or nick in var.IDLE_WARNED) and (not var.PM_WARN_IDLE_TIME or nick in var.IDLE_WARNED_PM)): to_kill.append(nick) elif (tdiff < timedelta(seconds=var.WARN_IDLE_TIME) and (nick in var.IDLE_WARNED or nick in var.IDLE_WARNED_PM)): var.IDLE_WARNED.discard(nick) # player saved themselves from death var.IDLE_WARNED_PM.discard(nick) for nck in to_kill: if nck not in var.list_players(): continue if var.ROLE_REVEAL in ("on", "team"): cli.msg(chan, ("\u0002{0}\u0002 didn't get out of bed for a very long "+ "time and has been found dead. The survivors bury "+ "the \u0002{1}\u0002's body.").format(nck, var.get_reveal_role(nck))) else: cli.msg(chan, ("\u0002{0}\u0002 didn't get out of bed for a very long " + "time and has been found dead.").format(nck)) for r,rlist in var.ORIGINAL_ROLES.items(): if nck in rlist: var.ORIGINAL_ROLES[r].remove(nck) var.ORIGINAL_ROLES[r].append("(dced)"+nck) make_stasis(nck, var.IDLE_STASIS_PENALTY) del_player(cli, nck, end_game = False, death_triggers = False) chk_win(cli) pl = var.list_players() x = [a for a in to_warn if a in pl] if x: cli.msg(chan, ("{0}: \u0002You have been idling for a while. "+ "Please say something soon or you "+ "might be declared dead.\u0002").format(", ".join(x))) msg_targets = [p for p in to_warn_pm if p in pl] mass_privmsg(cli, msg_targets, ("\u0002You have been idling in {0} for a while. Please say something in {0} "+ "or you will be declared dead.\u0002").format(chan), privmsg=True) for dcedplayer in list(var.DISCONNECTED.keys()): acc, cloak, timeofdc, what = var.DISCONNECTED[dcedplayer] if what in ("quit", "badnick") and (datetime.now() - timeofdc) > timedelta(seconds=var.QUIT_GRACE_TIME): if var.get_role(dcedplayer) != "person" and var.ROLE_REVEAL in ("on", "team"): cli.msg(chan, ("\u0002{0}\u0002 was mauled by wild animals and has died. It seems that "+ "\u0002{1}\u0002 meat is tasty.").format(dcedplayer, var.get_reveal_role(dcedplayer))) else: cli.msg(chan, ("\u0002{0}\u0002 was mauled by wild animals and has died.").format(dcedplayer)) if var.PHASE != "join": make_stasis(dcedplayer, var.PART_STASIS_PENALTY) if not del_player(cli, dcedplayer, devoice = False, death_triggers = False): return elif what == "part" and (datetime.now() - timeofdc) > timedelta(seconds=var.PART_GRACE_TIME): if var.get_role(dcedplayer) != "person" and var.ROLE_REVEAL in ("on", "team"): cli.msg(chan, ("\u0002{0}\u0002, a \u0002{1}\u0002, ate some poisonous berries "+ "and has died.").format(dcedplayer, var.get_reveal_role(dcedplayer))) else: cli.msg(chan, ("\u0002{0}\u0002 ate some poisonous berries and has died.").format(dcedplayer)) if var.PHASE != "join": make_stasis(dcedplayer, var.PART_STASIS_PENALTY) if not del_player(cli, dcedplayer, devoice = False, death_triggers = False): return elif what == "account" and (datetime.now() - timeofdc) > timedelta(seconds=var.ACC_GRACE_TIME): if var.get_role(dcedplayer) != "person" and var.ROLE_REVEAL in ("on", "team"): cli.msg(chan, ("\u0002{0}\u0002 has died of a heart attack. The villagers "+ "couldn't save the \u0002{1}\u0002.").format(dcedplayer, var.get_reveal_role(dcedplayer))) else: cli.msg(chan, ("\u0002{0}\u0002 has died of a heart attack.").format(dcedplayer)) if var.PHASE != "join": make_stasis(dcedplayer, var.ACC_STASIS_PENALTY) if not del_player(cli, dcedplayer, devoice = False, death_triggers = False): return time.sleep(10) @cmd("") # update last said def update_last_said(cli, nick, chan, rest): if chan != botconfig.CHANNEL: return if var.PHASE not in ("join", "none"): var.LAST_SAID_TIME[nick] = datetime.now() fullstring = "".join(rest) if var.CARE_BOLD and BOLD in fullstring: if var.KILL_BOLD: cli.send("KICK {0} {1} :Using bold is not allowed".format(botconfig.CHANNEL, nick)) else: cli.notice(nick, "Using bold in the channel is not allowed.") if var.CARE_COLOR and any(code in fullstring for code in ["\u0003", "\u0016", "\u001f" ]): if var.KILL_COLOR: cli.send("KICK {0} {1} :Using color is not allowed".format(botconfig.CHANNEL, nick)) else: cli.notice(nick, "Using color in the channel is not allowed.") @hook("join") def on_join(cli, raw_nick, chan, acc="*", rname=""): nick,m,u,cloak = parse_nick(raw_nick) if nick != botconfig.NICK: if nick not in var.USERS.keys(): var.USERS[nick] = dict(cloak=cloak,account=acc,inchan=chan == botconfig.CHANNEL,modes=set(),moded=set()) else: var.USERS[nick]["cloak"] = cloak var.USERS[nick]["account"] = acc if not var.USERS[nick]["inchan"]: # Will be True if the user joined the main channel, else False var.USERS[nick]["inchan"] = (chan == botconfig.CHANNEL) if chan != botconfig.CHANNEL: return with var.GRAVEYARD_LOCK: if nick in var.DISCONNECTED.keys(): clk = var.DISCONNECTED[nick][1] act = var.DISCONNECTED[nick][0] if (acc == act and not var.DISABLE_ACCOUNTS) or (cloak == clk and not var.ACCOUNTS_ONLY): cli.mode(chan, "+v", nick, nick+"!*@*") del var.DISCONNECTED[nick] var.LAST_SAID_TIME[nick] = datetime.now() cli.msg(chan, "\u0002{0}\u0002 has returned to the village.".format(nick)) for r,rlist in var.ORIGINAL_ROLES.items(): if "(dced)"+nick in rlist: rlist.remove("(dced)"+nick) rlist.append(nick) break if nick in var.DCED_PLAYERS.keys(): var.PLAYERS[nick] = var.DCED_PLAYERS.pop(nick) if nick == botconfig.NICK: var.OPPED = False if nick == "ChanServ" and not var.OPPED: cli.msg("ChanServ", "op " + chan) @cmd("goat", playing=True, phases=("day",)) def goat(cli, nick, chan, rest): """Use a goat to interact with anyone in the channel during the day.""" if var.GOATED and nick not in var.SPECIAL_ROLES["goat herder"]: cli.notice(nick, "This can only be done once per day.") return ul = list(var.USERS.keys()) ull = [x.lower() for x in ul] rest = re.split(" +",rest)[0] if not rest: cli.notice(nick, "Not enough parameters.") victim, _ = complete_match(rest.lower(), ull) if not victim: cli.notice(nick, "\u0002{0}\u0002 is not in this channel.".format(rest)) return victim = ul[ull.index(victim)] goatact = random.choice(("kicks", "headbutts")) cli.msg(chan, "\u0002{0}\u0002's goat walks by and {1} \u0002{2}\u0002.".format( nick, goatact, victim)) var.GOATED = True @cmd("fgoat", admin_only=True) def fgoat(cli, nick, chan, rest): """Forces a goat to interact with anyone or anything, without limitations.""" nick_ = rest.split(' ')[0].strip() ul = list(var.USERS.keys()) if nick_.lower() in (x.lower() for x in ul): togoat = nick_ else: togoat = rest goatact = random.choice(("kicks", "headbutts")) cli.msg(chan, "\u0002{0}\u0002's goat walks by and {1} \u0002{2}\u0002.".format(nick, goatact, togoat)) def rename_player(cli, prefix, nick): chan = botconfig.CHANNEL if prefix == var.ADMIN_TO_PING: var.ADMIN_TO_PING = nick if prefix in var.list_players() and prefix not in var.DISCONNECTED.keys(): r = var.ROLES[var.get_role(prefix)] r.append(nick) r.remove(prefix) tpls = var.get_templates(prefix) for t in tpls: var.ROLES[t].append(nick) var.ROLES[t].remove(prefix) if var.PHASE in ("night", "day"): # ALL_PLAYERS needs to keep its ordering for purposes of mad scientist var.ALL_PLAYERS[var.ALL_PLAYERS.index(prefix)] = nick for k,v in var.ORIGINAL_ROLES.items(): if prefix in v: var.ORIGINAL_ROLES[k].remove(prefix) var.ORIGINAL_ROLES[k].append(nick) for k,v in list(var.PLAYERS.items()): if prefix == k: var.PLAYERS[nick] = var.PLAYERS[k] del var.PLAYERS[k] for dictvar in (var.HVISITED, var.OBSERVED, var.GUARDED, var.OTHER_KILLS, var.TARGETED, var.CLONED, var.LASTGUARDED, var.LASTGIVEN, var.LASTHEXED, var.BITE_PREFERENCES, var.SHAMANS): kvp = [] for a,b in dictvar.items(): if a == prefix: a = nick if b == prefix: b = nick kvp.append((a,b)) dictvar.update(kvp) if prefix in dictvar.keys(): del dictvar[prefix] for dictvar in (var.VENGEFUL_GHOSTS, var.TOTEMS, var.FINAL_ROLES, var.BITTEN, var.GUNNERS, var.TURNCOATS, var.DOCTORS, var.BITTEN_ROLES, var.LYCAN_ROLES, var.AMNESIAC_ROLES): if prefix in dictvar.keys(): dictvar[nick] = dictvar[prefix] del dictvar[prefix] for dictvar in (var.KILLS, var.LOVERS, var.ORIGINAL_LOVERS): kvp = [] for a,b in dictvar.items(): nl = [] for n in b: if n == prefix: n = nick nl.append(n) if a == prefix: a = nick kvp.append((a,nl)) dictvar.update(kvp) if prefix in dictvar.keys(): del dictvar[prefix] for idx, tup in enumerate(var.EXCHANGED_ROLES): a, b = tup if a == prefix: a = nick if b == prefix: b = nick var.EXCHANGED_ROLES[idx] = (a, b) if prefix in var.SEEN: var.SEEN.remove(prefix) var.SEEN.append(nick) if prefix in var.HEXED: var.HEXED.remove(prefix) var.HEXED.append(nick) if prefix in var.ASLEEP: var.ASLEEP.remove(prefix) var.ASLEEP.append(nick) if prefix in var.DESPERATE: var.DESPERATE.remove(prefix) var.DESPERATE.append(nick) for k, d in list(var.DEATH_TOTEM): if k == prefix or d == prefix: var.DEATH_TOTEM.remove((k, d)) nk = nick if k == prefix else k nd = nick if d == prefix else d var.DEATH_TOTEM.append((nk, nd)) while prefix in var.PROTECTED: var.PROTECTED.remove(prefix) var.PROTECTED.append(nick) if prefix in var.REVEALED: var.REVEALED.remove(prefix) var.REVEALED.append(nick) if prefix in var.SILENCED: var.SILENCED.remove(prefix) var.SILENCED.append(nick) if prefix in var.TOBESILENCED: var.TOBESILENCED.remove(prefix) var.TOBESILENCED.append(nick) if prefix in var.REVEALED_MAYORS: var.REVEALED_MAYORS.remove(prefix) var.REVEALED_MAYORS.append(nick) if prefix in var.MATCHMAKERS: var.MATCHMAKERS.remove(prefix) var.MATCHMAKERS.append(nick) if prefix in var.HUNTERS: var.HUNTERS.remove(prefix) var.HUNTERS.append(nick) if prefix in var.PASSED: var.PASSED.remove(prefix) var.PASSED.append(nick) if prefix in var.JESTERS: var.JESTERS.remove(prefix) var.JESTERS.append(nick) if prefix in var.AMNESIACS: var.AMNESIACS.remove(prefix) var.AMNESIACS.append(nick) while prefix in var.IMPATIENT: var.IMPATIENT.remove(prefix) var.IMPATIENT.append(nick) while prefix in var.PACIFISTS: var.PACIFISTS.remove(prefix) var.PACIFISTS.append(nick) if prefix in var.INFLUENTIAL: var.INFLUENTIAL.remove(prefix) var.INFLUENTIAL.append(nick) if prefix in var.LYCANTHROPES: var.LYCANTHROPES.remove(prefix) var.LYCANTHROPES.append(nick) if prefix in var.TOBELYCANTHROPES: var.TOBELYCANTHROPES.remove(prefix) var.TOBELYCANTHROPES.append(nick) if prefix in var.LUCKY: var.LUCKY.remove(prefix) var.LUCKY.append(nick) if prefix in var.TOBELUCKY: var.TOBELUCKY.remove(prefix) var.TOBELUCKY.append(nick) if prefix in var.DISEASED: var.DISEASED.remove(prefix) var.DISEASED.append(nick) if prefix in var.TOBEDISEASED: var.TOBEDISEASED.remove(prefix) var.TOBEDISEASED.append(nick) if prefix in var.RETRIBUTION: var.RETRIBUTION.remove(prefix) var.RETRIBUTION.append(nick) if prefix in var.MISDIRECTED: var.MISDIRECTED.remove(prefix) var.MISDIRECTED.append(nick) if prefix in var.TOBEMISDIRECTED: var.TOBEMISDIRECTED.remove(prefix) var.TOBEMISDIRECTED.append(nick) if prefix in var.EXCHANGED: var.EXCHANGED.remove(prefix) var.EXCHANGED.append(nick) if prefix in var.IMMUNIZED: var.IMMUNIZED.remove(prefix) var.IMMUNIZED.add(nick) if prefix in var.CURED_LYCANS: var.CURED_LYCANS.remove(prefix) var.CURED_LYCANS.append(nick) if prefix in var.ALPHA_WOLVES: var.ALPHA_WOLVES.remove(prefix) var.ALPHA_WOLVES.append(nick) if prefix in var.CURSED: var.CURSED.remove(prefix) var.CURSED.append(nick) if prefix in var.CHARMERS: var.CHARMERS.remove(prefix) var.CHARMERS.add(nick) if prefix in var.CHARMED: var.CHARMED.remove(prefix) var.CHARMED.add(nick) if prefix in var.TOBECHARMED: var.TOBECHARMED.remove(prefix) var.TOBECHARMED.add(nick) with var.GRAVEYARD_LOCK: # to be safe if prefix in var.LAST_SAID_TIME.keys(): var.LAST_SAID_TIME[nick] = var.LAST_SAID_TIME.pop(prefix) if prefix in getattr(var, "IDLE_WARNED", ()): var.IDLE_WARNED.remove(prefix) var.IDLE_WARNED.add(nick) if prefix in getattr(var, "IDLE_WARNED_PM", ()): var.IDLE_WARNED_PM.remove(prefix) var.IDLE_WARNED_PM.add(nick) if var.PHASE == "day": if prefix in var.WOUNDED: var.WOUNDED.remove(prefix) var.WOUNDED.append(nick) if prefix in var.INVESTIGATED: var.INVESTIGATED.remove(prefix) var.INVESTIGATED.append(nick) if prefix in var.VOTES: var.VOTES[nick] = var.VOTES.pop(prefix) for v in var.VOTES.values(): if prefix in v: v.remove(prefix) v.append(nick) if var.PHASE == "join": if prefix in var.GAMEMODE_VOTES: var.GAMEMODE_VOTES[nick] = var.GAMEMODE_VOTES[prefix] del var.GAMEMODE_VOTES[prefix] # Check if he was DC'ed if var.PHASE in ("night", "day"): with var.GRAVEYARD_LOCK: if nick in var.DISCONNECTED.keys(): clk = var.DISCONNECTED[nick][1] act = var.DISCONNECTED[nick][0] if nick in var.USERS: cloak = var.USERS[nick]["cloak"] acc = var.USERS[nick]["account"] else: acc = None if not acc or acc == "*": acc = None if (acc and acc == act) or (cloak == clk and not var.ACCOUNTS_ONLY): cli.mode(chan, "+v", nick, nick+"!*@*") del var.DISCONNECTED[nick] var.LAST_SAID_TIME[nick] = datetime.now() cli.msg(chan, "\u0002{0}\u0002 has returned to the village.".format(nick)) for r,rlist in var.ORIGINAL_ROLES.items(): if "(dced)"+nick in rlist: rlist.remove("(dced)"+nick) rlist.append(nick) if nick in var.DCED_PLAYERS.keys(): var.PLAYERS[nick] = var.DCED_PLAYERS.pop(nick) if prefix in var.NO_LYNCH: var.NO_LYNCH.remove(prefix) var.NO_LYNCH.append(nick) @hook("nick") def on_nick(cli, oldnick, nick): prefix,u,m,cloak = parse_nick(oldnick) chan = botconfig.CHANNEL if (nick.startswith("Guest") or nick[0].isdigit() or (nick != "away" and "away" in nick.lower())) and nick not in var.DISCONNECTED.keys() and prefix in var.list_players(): if var.PHASE != "join": cli.mode(chan, "-v", nick) leave(cli, "badnick", oldnick) # update var.USERS after so that leave() can keep track of new nick to use properly # return after doing this so that none of the game vars are updated with the bad nickname if prefix in var.USERS: var.USERS[nick] = var.USERS.pop(prefix) return if prefix in var.USERS: var.USERS[nick] = var.USERS.pop(prefix) if not var.USERS[nick]["inchan"]: return rename_player(cli, prefix, nick) def leave(cli, what, nick, why=""): nick, _, _, cloak = parse_nick(nick) if nick in var.USERS: acc = var.USERS[nick]["account"] cloak = var.USERS[nick]["cloak"] if what == "quit" or (not what in ("account",) and why == botconfig.CHANNEL): var.USERS[nick]["inchan"] = False else: acc = None if not acc or acc == "*": acc = None if what in ("part", "kick") and why != botconfig.CHANNEL: return if why and why == botconfig.CHANGING_HOST_QUIT_MESSAGE: return if var.PHASE == "none": return # only mark living players as dced, unless they were kicked if nick in var.PLAYERS and (what == "kick" or nick in var.list_players()): # must prevent double entry in var.ORIGINAL_ROLES for r,rlist in var.ORIGINAL_ROLES.items(): if nick in rlist: var.ORIGINAL_ROLES[r].remove(nick) var.ORIGINAL_ROLES[r].append("(dced)"+nick) break var.DCED_PLAYERS[nick] = var.PLAYERS.pop(nick) if nick not in var.list_players() or nick in var.DISCONNECTED.keys(): return # the player who just quit was in the game killplayer = True population = "" if var.PHASE == "join": lpl = len(var.list_players()) - 1 if lpl == 0: population = (" No more players remaining.") else: population = (" New player count: \u0002{0}\u0002").format(lpl) if what == "part" and (not var.PART_GRACE_TIME or var.PHASE == "join"): if var.get_role(nick) != "person" and var.ROLE_REVEAL in ("on", "team"): msg = ("\u0002{0}\u0002, a \u0002{1}\u0002, ate some poisonous berries and has "+ "died.{2}").format(nick, var.get_reveal_role(nick), population) else: msg = ("\u0002{0}\u0002 ate some poisonous berries and has died.{1}").format(nick, population) elif what in ("quit", "badnick") and (not var.QUIT_GRACE_TIME or var.PHASE == "join"): if var.get_role(nick) != "person" and var.ROLE_REVEAL in ("on", "team"): msg = ("\u0002{0}\u0002 was mauled by wild animals and has died. It seems that "+ "\u0002{1}\u0002 meat is tasty.{2}").format(nick, var.get_reveal_role(nick), population) else: msg = ("\u0002{0}\u0002 was mauled by wild animals and has died.{1}").format(nick, population) elif what == "account" and (not var.ACC_GRACE_TIME or var.PHASE == "join"): if var.get_role(nick) != "person" and var.ROLE_REVEAL in ("on", "team"): msg = ("\u0002{0}\u0002 fell into a river and was swept away. The villagers couldn't "+ "save the \u0002{1}\u0002.{2}").format(nick, var.get_reveal_role(nick), population) else: msg = ("\u0002{0}\u0002 fell into a river and was swept away.{1}").format(nick, population) elif what != "kick": msg = "\u0002{0}\u0002 has gone missing.".format(nick) killplayer = False else: if var.get_role(nick) != "person" and var.ROLE_REVEAL in ("on", "team"): msg = ("\u0002{0}\u0002 died due to falling off a cliff. The "+ "\u0002{1}\u0002 is lost to the ravine forever.{2}").format(nick, var.get_reveal_role(nick), population) else: msg = ("\u0002{0}\u0002 died due to falling off a cliff.{1}").format(nick, population) make_stasis(nick, var.LEAVE_STASIS_PENALTY) cli.msg(botconfig.CHANNEL, msg) if what not in ("badnick", "account") and nick in var.USERS: var.USERS[nick]["modes"] = set() var.USERS[nick]["moded"] = set() if killplayer: del_player(cli, nick, death_triggers = False) else: var.DISCONNECTED[nick] = (acc, cloak, datetime.now(), what) #Functions decorated with hook do not parse the nick by default hook("part")(lambda cli, nick, *rest: leave(cli, "part", nick, rest[0])) hook("quit")(lambda cli, nick, *rest: leave(cli, "quit", nick, rest[0])) hook("kick")(lambda cli, nick, *rest: leave(cli, "kick", rest[1], rest[0])) @cmd("quit", "leave", playing=True, phases=("join", "day", "night")) def leave_game(cli, nick, chan, rest): """Quits the game.""" if var.PHASE == "join": lpl = len(var.list_players()) - 1 if lpl == 0: population = (" No more players remaining.") else: population = (" New player count: \u0002{0}\u0002").format(lpl) else: dur = int(var.START_QUIT_DELAY - (datetime.now() - var.GAME_START_TIME).total_seconds()) if var.START_QUIT_DELAY and dur > 0: cli.notice(nick, "The game already started! If you still want to quit, try again in {0} second{1}.".format(dur, "" if dur == 1 else "s")) return population = "" if var.get_role(nick) != "person" and var.ROLE_REVEAL in ("on", "team"): role = var.get_reveal_role(nick) an = "n" if role.startswith(("a", "e", "i", "o", "u")) else "" if var.DYNQUIT_DURING_GAME: lmsg = random.choice(var.QUIT_MESSAGES).format(nick, an, role) cli.msg(botconfig.CHANNEL, lmsg) else: cli.msg(botconfig.CHANNEL, ("\u0002{0}\u0002, a \u0002{1}\u0002, has died of an unknown disease.{2}").format(nick, role, population)) else: # DYNQUIT_DURING_GAME should not have any effect during the join phase, so only check if we aren't in that if var.PHASE != "join" and not var.DYNQUIT_DURING_GAME: cli.msg(botconfig.CHANNEL, ("\u0002{0}\u0002 has died of an unknown disease.{1}").format(nick, population)) else: lmsg = random.choice(var.QUIT_MESSAGES_NO_REVEAL).format(nick) + population cli.msg(botconfig.CHANNEL, lmsg) if var.PHASE != "join": for r, rlist in var.ORIGINAL_ROLES.items(): if nick in rlist: var.ORIGINAL_ROLES[r].remove(nick) var.ORIGINAL_ROLES[r].append("(dced)"+nick) make_stasis(nick, var.LEAVE_STASIS_PENALTY) if nick in var.PLAYERS: var.DCED_PLAYERS[nick] = var.PLAYERS.pop(nick) del_player(cli, nick, death_triggers = False) def begin_day(cli): chan = botconfig.CHANNEL # Reset nighttime variables var.GAMEPHASE = "day" var.KILLS = {} # nicknames of kill victims (wolves only) var.OTHER_KILLS = {} # other kill victims (hunter/vengeful ghost) var.KILLER = "" # nickname of who chose the victim var.SEEN = [] # list of seers/oracles/augurs that have had visions var.HEXED = [] # list of hags that have silenced others var.SHAMANS = {} # dict of shamans/crazed shamans that have acted and who got totems var.OBSERVED = {} # those whom werecrows/sorcerers have observed var.HVISITED = {} # those whom harlots have visited var.GUARDED = {} # this whom bodyguards/guardian angels have guarded var.PASSED = [] # list of certain roles that have opted not to act var.STARTED_DAY_PLAYERS = len(var.list_players()) var.SILENCED = copy.copy(var.TOBESILENCED) var.LYCANTHROPES = copy.copy(var.TOBELYCANTHROPES) var.LUCKY = copy.copy(var.TOBELUCKY) var.DISEASED = copy.copy(var.TOBEDISEASED) var.MISDIRECTED = copy.copy(var.TOBEMISDIRECTED) var.ACTIVE_PROTECTIONS = defaultdict(list) msg = ('The villagers must now vote for whom to lynch. '+ 'Use "{0}lynch <nick>" to cast your vote. {1} votes '+ 'are required to lynch.').format(botconfig.CMD_CHAR, len(var.list_players()) // 2 + 1) cli.msg(chan, msg) var.DAY_ID = time.time() if var.DAY_TIME_WARN > 0: if var.STARTED_DAY_PLAYERS <= var.SHORT_DAY_PLAYERS: t1 = threading.Timer(var.SHORT_DAY_WARN, hurry_up, [cli, var.DAY_ID, False]) l = var.SHORT_DAY_WARN else: t1 = threading.Timer(var.DAY_TIME_WARN, hurry_up, [cli, var.DAY_ID, False]) l = var.DAY_TIME_WARN var.TIMERS["day_warn"] = (t1, var.DAY_ID, l) t1.daemon = True t1.start() if var.DAY_TIME_LIMIT > 0: # Time limit enabled if var.STARTED_DAY_PLAYERS <= var.SHORT_DAY_PLAYERS: t2 = threading.Timer(var.SHORT_DAY_LIMIT, hurry_up, [cli, var.DAY_ID, True]) l = var.SHORT_DAY_LIMIT else: t2 = threading.Timer(var.DAY_TIME_LIMIT, hurry_up, [cli, var.DAY_ID, True]) l = var.DAY_TIME_LIMIT var.TIMERS["day"] = (t2, var.DAY_ID, l) t2.daemon = True t2.start() def night_warn(cli, gameid): if gameid != var.NIGHT_ID: return if var.PHASE == "day": return cli.msg(botconfig.CHANNEL, ("\u0002A few villagers awake early and notice it " + "is still dark outside. " + "The night is almost over and there are " + "still whispers heard in the village.\u0002")) def transition_day(cli, gameid=0): if gameid: if gameid != var.NIGHT_ID: return var.NIGHT_ID = 0 if var.PHASE == "day": return var.PHASE = "day" var.GOATED = False chan = botconfig.CHANNEL pl = var.list_players() if not var.START_WITH_DAY or not var.FIRST_DAY: # bodyguard doesn't have restrictions, but being checked anyway since both GA and bodyguard use var.GUARDED if len(var.GUARDED.keys()) < len(var.ROLES["bodyguard"] + var.ROLES["guardian angel"]): for gangel in var.ROLES["guardian angel"]: if gangel not in var.GUARDED or var.GUARDED[gangel] is None: var.LASTGUARDED[gangel] = None if len(var.HEXED) < len(var.ROLES["hag"]): for hag in var.ROLES["hag"]: if hag not in var.HEXED: var.LASTHEXED[hag] = None # NOTE: Random assassin selection is further down, since if we're choosing at random we pick someone # that isn't going to be dying today, meaning we need to know who is dying first :) # Select a random target for vengeful ghost if they didn't kill wolves = var.list_players(var.WOLFTEAM_ROLES) villagers = var.list_players() for wolf in wolves: villagers.remove(wolf) for ghost, target in var.VENGEFUL_GHOSTS.items(): if target[0] == "!" or ghost in var.SILENCED: continue if ghost not in var.OTHER_KILLS: if target == "wolves": var.OTHER_KILLS[ghost] = random.choice(wolves) else: var.OTHER_KILLS[ghost] = random.choice(villagers) # Select random totem recipients if shamans didn't act shamans = var.list_players(var.TOTEM_ORDER) for shaman in shamans: if shaman not in var.SHAMANS and shaman not in var.SILENCED: ps = pl[:] if var.LASTGIVEN.get(shaman) in ps: ps.remove(var.LASTGIVEN.get(shaman)) totem.func(cli, shaman, shaman, random.choice(ps), prefix="Because you forgot to give out your totem at night, you") else: var.LASTGIVEN[shaman] = None if var.FIRST_NIGHT: # Select a random target for clone if they didn't choose someone for clone in var.ROLES["clone"]: if clone not in var.CLONED: ps = pl[:] ps.remove(clone) if len(ps) > 0: target = random.choice(ps) var.CLONED[clone] = target pm(cli, clone, "Because you forgot to select someone to clone at night, you are now cloning \u0002{0}\u0002.".format(target)) for mm in var.ROLES["matchmaker"]: if mm not in var.MATCHMAKERS: lovers = random.sample(pl, 2) choose.func(cli, mm, mm, lovers[0] + " " + lovers[1], sendmsg=False) pm(cli, mm, "Because you forgot to choose lovers at night, two lovers have been selected for you.") # Reset daytime variables var.VOTES = {} var.INVESTIGATED = [] var.WOUNDED = [] var.DAY_START_TIME = datetime.now() var.NO_LYNCH = [] var.DAY_COUNT += 1 var.FIRST_DAY = (var.DAY_COUNT == 1) # Give out totems here for shaman, target in var.SHAMANS.items(): totemname = var.TOTEMS[shaman] victim = choose_target(shaman, target) if totemname == "death": # this totem stacks var.DEATH_TOTEM.append((shaman, victim)) elif totemname == "protection": # this totem stacks var.PROTECTED.append(victim) elif totemname == "revealing": if victim not in var.REVEALED: var.REVEALED.append(victim) elif totemname == "narcolepsy": if victim not in var.ASLEEP: var.ASLEEP.append(victim) elif totemname == "silence": if victim not in var.TOBESILENCED: var.TOBESILENCED.append(victim) elif totemname == "desperation": if victim not in var.DESPERATE: var.DESPERATE.append(victim) elif totemname == "impatience": # this totem stacks var.IMPATIENT.append(victim) elif totemname == "pacifism": # this totem stacks var.PACIFISTS.append(victim) elif totemname == "influence": if victim not in var.INFLUENTIAL: var.INFLUENTIAL.append(victim) elif totemname == "exchange": if victim not in var.EXCHANGED: var.EXCHANGED.append(victim) elif totemname == "lycanthropy": if victim not in var.TOBELYCANTHROPES: var.TOBELYCANTHROPES.append(victim) elif totemname == "luck": if victim not in var.TOBELUCKY: var.TOBELUCKY.append(victim) elif totemname == "pestilence": if victim not in var.TOBEDISEASED: var.TOBEDISEASED.append(victim) elif totemname == "retribution": if victim not in var.RETRIBUTION: var.RETRIBUTION.append(victim) elif totemname == "misdirection": if victim not in var.TOBEMISDIRECTED: var.TOBEMISDIRECTED.append(victim) else: debuglog("{0} {1}: INVALID TOTEM {2} TO {3}".format(shaman, var.get_role(shaman), totemname, victim)) if target != victim: pm(cli, shaman, "It seems that \u0002{0}\u0002 now has the totem you gave out last night.".format(victim)) var.LASTGIVEN[shaman] = victim havetotem = sorted(x for x in var.LASTGIVEN.values() if x) # Send out PMs to players who have been charmed for victim in var.TOBECHARMED: charmedlist = list(var.CHARMED | var.TOBECHARMED - {victim}) message = ("You hear the sweet tones of a flute coming from outside your window... You " "inexorably walk outside and find yourself in the village square. ") if len(charmedlist) <= 0: pm(cli, victim, message + "There are no other charmed players.") elif len(charmedlist) == 1: pm(cli, victim, message + "You find out that \u0002{0}\u0002 is also charmed!".format(charmedlist[0])) elif len(charmedlist) == 2: pm(cli, victim, message + ("You find out that \u0002{0}\u0002 and \u0002{1}\u0002 " "are also charmed!").format(charmedlist[0], charmedlist[1])) else: pm(cli, victim, message + ("You find out that \u0002{0}\u0002, and \u0002{1}\u0002 " "are also charmed!").format("\u0002, \u0002".join(charmedlist[:-1]), charmedlist[-1])) if var.TOBECHARMED: tobecharmedlist = list(var.TOBECHARMED) for victim in var.CHARMED: if len(tobecharmedlist) == 1: message = "\u0002{0}\u0002 is now charmed!".format(tobecharmedlist[0]) elif len(tobecharmedlist) == 2: message = "\u0002{0}\u0002 and \u0002{1}\u0002 are now charmed!".format(tobecharmedlist[0], tobecharmedlist[1]) else: message = "\u0002{0}\u0002, and \u0002{1}\u0002 are also charmed!".format( "\u0002, \u0002".join(tobecharmedlist[:-1]), tobecharmedlist[-1]) previouscharmed = var.CHARMED - {victim} if len(previouscharmed): pm(cli, victim, message + (" Previously charmed players: " "{0}").format("\u0002, \u0002".join(previouscharmed))) else: pm(cli, victim, message) var.CHARMED.update(var.TOBECHARMED) var.TOBECHARMED.clear() if var.START_WITH_DAY and var.FIRST_DAY: # TODO: need to message everyone their roles and give a short thing saying "it's daytime" # but this is good enough for now to prevent it from crashing begin_day(cli) return td = var.DAY_START_TIME - var.NIGHT_START_TIME var.NIGHT_START_TIME = None var.NIGHT_TIMEDELTA += td min, sec = td.seconds // 60, td.seconds % 60 # determine if we need to play the new wolf message due to bitten people new_wolf = False for (p, v) in var.BITTEN.items(): if v <= 0: new_wolf = True break found = defaultdict(int) for v in var.KILLS.values(): for p in v: found[p] += 1 maxc = 0 victims = [] bitten = [] killers = defaultdict(list) # dict of victim: list of killers (for retribution totem) bywolves = set() # wolves targeted, others may have as well (needed for harlot visit and maybe other things) onlybywolves = set() # wolves and nobody else targeted (needed for lycan) dups = [] for v, c in found.items(): if c > maxc: maxc = c dups = [v] elif c == maxc: dups.append(v) if maxc and dups: victim = random.choice(dups) victims.append(victim) bywolves.add(victim) onlybywolves.add(victim) killers[victim].append("@wolves") # special key to let us know to randomly select a wolf if victims and var.ANGRY_WOLVES: # they got a 2nd kill del found[victims[0]] maxc = 0 dups = [] for v, c in found.items(): if c > maxc: maxc = c dups = [v] elif c == maxc: dups.append(v) if maxc and dups: victim = random.choice(dups) victims.append(victim) bywolves.add(victim) onlybywolves.add(victim) killers[victim].append("@wolves") # special key to let us know to randomly select a wolf if len(var.ROLES["fallen angel"]) == 0: for monster in var.ROLES["monster"]: if monster in victims: victims.remove(monster) bywolves.discard(monster) onlybywolves.discard(monster) wolfghostvictims = [] for k, d in var.OTHER_KILLS.items(): victims.append(d) onlybywolves.discard(d) killers[d].append(k) if var.VENGEFUL_GHOSTS.get(k) == "villagers": wolfghostvictims.append(d) for k, d in var.DEATH_TOTEM: victims.append(d) onlybywolves.discard(d) killers[d].append(k) victims_set = set(victims) # remove duplicates victims_set.discard(None) # in the event that ever happens vappend = [] # this keeps track of the protections active on each nick, stored in var since del_player needs to access it for sake of assassin protected = {} var.ACTIVE_PROTECTIONS = defaultdict(list) # Logic out stacked kills and protections. If we get down to 1 kill remaining that is valid and the victim is in bywolves, # we re-add them to onlybywolves to indicate that the other kill attempts were guarded against (and the wolf kill is what went through) # If protections >= kills, we keep track of which protection message to show (prot totem > GA > bodyguard) pl = var.list_players() for v in pl: if v in victims_set: numkills = victims.count(v) numtotems = var.PROTECTED.count(v) if numtotems >= numkills: protected[v] = "totem" if numtotems > numkills: for i in range(0, numtotems - numkills): var.ACTIVE_PROTECTIONS[v].append("totem") numkills -= numtotems for g in var.ROLES["guardian angel"]: if var.GUARDED.get(g) == v: numkills -= 1 if numkills <= 0 and v not in protected: protected[v] = "angel" elif numkills <= 0: var.ACTIVE_PROTECTIONS[v].append("angel") for g in var.ROLES["bodyguard"]: if var.GUARDED.get(g) == v: numkills -= 1 if numkills <= 0 and v not in protected: protected[v] = "bodyguard" elif numkills <= 0: var.ACTIVE_PROTECTIONS[v].append("bodyguard") numkills -= 1 if numkills == 1 and v in bywolves: onlybywolves.add(v) else: # player wasn't targeted, but apply protections on them numtotems = var.PROTECTED.count(v) for i in range(0, numtotems): var.ACTIVE_PROTECTIONS[v].append("totem") for g in var.ROLES["guardian angel"]: if var.GUARDED.get(g) == v: var.ACTIVE_PROTECTIONS[v].append("angel") for g in var.ROLES["bodyguard"]: if var.GUARDED.get(g) == v: var.ACTIVE_PROTECTIONS[v].append("bodyguard") fallenkills = set() brokentotem = set() if len(var.ROLES["fallen angel"]) > 0: for p, t in list(protected.items()): if p in bywolves: for g in var.ROLES["guardian angel"]: if var.GUARDED.get(g) == p and random.random() < var.FALLEN_ANGEL_KILLS_GUARDIAN_ANGEL_CHANCE: if g in protected: del protected[g] bywolves.add(g) victims.append(g) fallenkills.add(g) if g not in victims_set: victims_set.add(g) onlybywolves.add(g) for g in var.ROLES["bodyguard"]: if var.GUARDED.get(g) == p: if g in protected: del protected[g] bywolves.add(g) victims.append(g) fallenkills.add(g) if g not in victims_set: victims_set.add(g) onlybywolves.add(g) # we'll never end up killing a shaman who gave out protection, but delete the totem since # story-wise it gets demolished at night by the FA while p in havetotem: havetotem.remove(p) brokentotem.add(p) if p in protected: del protected[p] if p in var.ACTIVE_PROTECTIONS: del var.ACTIVE_PROTECTIONS[p] if var.ALPHA_ENABLED: # check for bites for (alpha, target) in var.BITE_PREFERENCES.items(): # bite is now separate but some people may try to double up still, if bitten person is # also being killed by wolves, make the kill not apply # note that we cannot bite visiting harlots unless they are visiting a wolf, # and lycans/immunized people turn/die instead of being bitten, so keep the kills valid on those hvisit = var.HVISITED.get(target) if (target in onlybywolves and (target not in var.ROLES["harlot"] or not hvisit or var.get_role(hvisit) not in var.WOLFCHAT_ROLES or (hvisit in bywolves and hvisit not in protected)) and target not in var.ROLES["lycan"] and target not in var.LYCANTHROPES and target not in var.IMMUNIZED): victims.remove(target) bywolves.remove(target) onlybywolves.remove(target) killers[target].remove("@wolves") if target not in victims: victims_set.discard(target) if target in victims_set: # bite was unsuccessful var.ALPHA_WOLVES.remove(alpha) else: var.BITTEN[target] = var.ALPHA_WOLF_NIGHTS bitten.append(target) if alpha in var.ALPHA_WOLVES: pm(cli, alpha, "You have bitten \u0002{0}\u0002.".format(target)) else: pm(cli, alpha, "You tried to bite \u0002{0}\u0002, but it didn't work. Better luck next time!".format(target)) var.BITE_PREFERENCES = {} victims = [] # Ensures that special events play for bodyguard and harlot-visiting-victim so that kill can # be correctly attributed to wolves (for vengeful ghost lover), and that any gunner events # can play. Harlot visiting wolf doesn't play special events if they die via other means since # that assumes they die en route to the wolves (and thus don't shoot/give out gun/etc.) for v in victims_set: if v in var.ROLES["bodyguard"] and var.GUARDED.get(v) in victims_set: vappend.append(v) elif v in var.ROLES["harlot"] and var.HVISITED.get(v) in victims_set: vappend.append(v) else: victims.append(v) prevlen = var.MAX_PLAYERS + 10 while len(vappend) > 0: if len(vappend) == prevlen: # have a circular dependency, try to break it by appending the next value v = vappend[0] vappend.remove(v) victims.append(v) continue prevlen = len(vappend) for v in copy.copy(vappend): if v in var.ROLES["bodyguard"] and var.GUARDED.get(v) not in vappend: vappend.remove(v) victims.append(v) elif v in var.ROLES["harlot"] and var.HVISITED.get(v) not in vappend: vappend.remove(v) victims.append(v) # If FA is killing through a guard, let them as well as the victim know so they don't # try to report the extra kills as a bug fallenmsg = set() if len(var.ROLES["fallen angel"]) > 0: for v in fallenkills: t = var.GUARDED.get(v) if v not in fallenmsg: fallenmsg.add(v) if v != t: pm(cli, v, ("A fell wind starts blowing through the village and you catch the flurry of blackened wings out of the corner of your eye. " + "No longer caring for \u0002{0}\u0002's safety, you attempt to get away before your own life is taken...").format(t)) else: pm(cli, v, "A fell wind blows through you and chills you to the bone. You no longer feel safe or protected...") if v != t and t not in fallenmsg: fallenmsg.add(t) pm(cli, t, "A fell wind blows through you and chills you to the bone. You no longer feel safe or protected...") # Also message GAs that don't die and their victims for g in var.ROLES["guardian angel"]: v = var.GUARDED.get(g) if v in bywolves and g not in fallenkills: if g not in fallenmsg: fallenmsg.add(g) if g != v: pm(cli, g, ("A fell wind starts blowing through the village and you catch the flurry of blackened wings out of the corner of your eye. " + "No longer caring for \u0002{0}\u0002's safety, you attempt to get away before your own life is taken...").format(v)) else: pm(cli, g, "A fell wind blows through you and chills you to the bone. You no longer feel safe or protected...") if g != v and v not in fallenmsg: fallenmsg.add(g) pm(cli, v, "A fell wind blows through you and chills you to the bone. You no longer feel safe or protected...") # Select a random target for assassin that isn't already going to die if they didn't target pl = var.list_players() for ass in var.ROLES["assassin"]: if ass not in var.TARGETED and ass not in var.SILENCED: ps = pl[:] ps.remove(ass) for victim in victims: if victim in ps: ps.remove(victim) if len(ps) > 0: target = random.choice(ps) var.TARGETED[ass] = target pm(cli, ass, "Because you forgot to select a target at night, you are now targeting \u0002{0}\u0002.".format(target)) message = [("Night lasted \u0002{0:0>2}:{1:0>2}\u0002. It is now daytime. "+ "The villagers awake, thankful for surviving the night, "+ "and search the village... ").format(min, sec)] # This needs to go down here since having them be their night value matters above var.ANGRY_WOLVES = False var.DISEASED_WOLVES = False var.ALPHA_ENABLED = False dead = [] for crow, target in iter(var.OBSERVED.items()): if crow not in var.ROLES["werecrow"]: continue if ((target in list(var.HVISITED.keys()) and var.HVISITED[target]) or # if var.HVISITED[target] is None, harlot visited self target in var.SEEN or target in var.SHAMANS or (target in list(var.GUARDED.keys()) and var.GUARDED[target])): pm(cli, crow, ("As the sun rises, you conclude that \u0002{0}\u0002 was not in "+ "bed all night, and you fly back to your house.").format(target)) else: pm(cli, crow, ("As the sun rises, you conclude that \u0002{0}\u0002 was sleeping "+ "all night long, and you fly back to your house.").format(target)) vlist = copy.copy(victims) novictmsg = True if new_wolf: message.append("A chilling howl was heard last night. It appears there is another werewolf in our midst!") var.EXTRA_WOLVES += 1 novictmsg = False for victim in vlist: if victim in var.ROLES["harlot"] and var.HVISITED.get(victim) and victim not in dead and victim in onlybywolves: # alpha wolf can bite a harlot visiting another wolf, don't play a message in that case # kept as a nested if so that the other victim logic does not run if victim not in bitten: message.append("The wolves' selected victim was a harlot, who was not at home last night.") novictmsg = False elif protected.get(victim) == "totem": message.append(("\u0002{0}\u0002 was attacked last night, but their totem " + "emitted a brilliant flash of light, blinding the attacker and " + "allowing them to escape.").format(victim)) novictmsg = False elif protected.get(victim) == "angel": message.append(("\u0002{0}\u0002 was attacked last night, but luckily, the guardian angel was on duty.").format(victim)) novictmsg = False elif protected.get(victim) == "bodyguard": for bodyguard in var.ROLES["bodyguard"]: if var.GUARDED.get(bodyguard) == victim: dead.append(bodyguard) message.append(("\u0002{0}\u0002 sacrificed their life to guard that of another.").format(bodyguard)) novictmsg = False break elif (victim in var.ROLES["lycan"] or victim in var.LYCANTHROPES) and victim in onlybywolves and victim not in var.IMMUNIZED: vrole = var.get_role(victim) if vrole not in var.WOLFCHAT_ROLES: message.append("A chilling howl was heard last night. It appears there is another werewolf in our midst!") var.EXTRA_WOLVES += 1 pm(cli, victim, "HOOOOOOOOOWL. You have become... a wolf!") var.LYCAN_ROLES[victim] = vrole var.ROLES[vrole].remove(victim) var.ROLES["wolf"].append(victim) var.FINAL_ROLES[victim] = "wolf" wolves = var.list_players(var.WOLFCHAT_ROLES) random.shuffle(wolves) wolves.remove(victim) # remove self from list for i, wolf in enumerate(wolves): pm(cli, wolf, "\u0002{0}\u0002 is now a wolf!".format(victim)) role = var.get_role(wolf) cursed = "" if wolf in var.ROLES["cursed villager"]: cursed = "cursed " wolves[i] = "\u0002{0}\u0002 ({1}{2})".format(wolf, cursed, role) pm(cli, victim, "Wolves: " + ", ".join(wolves)) novictmsg = False elif victim not in dead: # not already dead via some other means if victim in var.RETRIBUTION: loser = random.choice(killers[victim]) if loser == "@wolves": wolves = var.list_players(var.WOLF_ROLES) for crow in var.ROLES["werecrow"]: if crow in var.OBSERVED: wolves.remove(crow) loser = random.choice(wolves) if loser in var.VENGEFUL_GHOSTS.keys(): # mark ghost as being unable to kill any more var.VENGEFUL_GHOSTS[loser] = "!" + var.VENGEFUL_GHOSTS[loser] message.append(("\u0002{0}\u0002's totem emitted a brilliant flash of light last night. " + "It appears that \u0002{1}\u0002's spirit was driven away by the flash.").format(victim, loser)) else: dead.append(loser) if var.ROLE_REVEAL in ("on", "team"): role = var.get_reveal_role(loser) an = "n" if role.startswith(("a", "e", "i", "o", "u")) else "" message.append(("\u0002{0}\u0002's totem emitted a brilliant flash of light last night. " + "The dead body of \u0002{1}\u0002, a{2} \u0002{3}\u0002, was found at the scene.").format(victim, loser, an, role)) else: message.append(("\u0002{0}\u0002's totem emitted a brilliant flash of light last night. " + "The dead body of \u0002{1}\u0002 was found at the scene.").format(victim, loser)) if var.ROLE_REVEAL in ("on", "team"): role = var.get_reveal_role(victim) an = "n" if role.startswith(("a", "e", "i", "o", "u")) else "" message.append(("The dead body of \u0002{0}\u0002, a{1} \u0002{2}\u0002, is found. " + "Those remaining mourn the tragedy.").format(victim, an, role)) else: message.append(("The dead body of \u0002{0}\u0002 is found. " + "Those remaining mourn the tragedy.").format(victim)) dead.append(victim) if random.random() < var.GIF_CHANCE: message.append(random.choice( ["https://i.imgur.com/nO8rZ.gifv", "https://i.imgur.com/uGVfZ.gifv", "https://i.imgur.com/mUcM09n.gifv", "https://i.imgur.com/P7TEGyQ.gifv", "https://i.imgur.com/b8HAvjL.gifv", "https://i.imgur.com/PIIfL15.gifv"] )) elif random.random() < var.FORTUNE_CHANCE: try: out = subprocess.check_output(("fortune", "-s")) except OSError as e: if e.errno != 2: # No such file or directory (fortune is not installed) raise else: out = out.decode("utf-8", "replace") out = out.replace("\n", " ") out = re.sub(r"\s+", " ", out) # collapse whitespace out = out.strip() # remove surrounding whitespace message.append(out) # handle separately so it always happens no matter how victim dies, and so that we can account for bitten victims as well for victim in victims + bitten: if victim in dead and victim in var.HVISITED.values() and (victim in bywolves or victim in bitten): # victim was visited by some harlot and victim was attacked by wolves for hlt in var.HVISITED.keys(): if var.HVISITED[hlt] == victim and hlt not in bitten and hlt not in dead: message.append(("\u0002{0}\u0002, a \u0002harlot\u0002, made the unfortunate mistake of "+ "visiting the victim's house last night and is "+ "now dead.").format(hlt)) bywolves.add(hlt) onlybywolves.add(hlt) dead.append(hlt) if novictmsg and len(dead) == 0: message.append(random.choice(var.NO_VICTIMS_MESSAGES) + " All villagers, however, have survived.") for harlot in var.ROLES["harlot"]: if var.HVISITED.get(harlot) in var.list_players(var.WOLF_ROLES) and harlot not in dead and harlot not in bitten: message.append(("\u0002{0}\u0002, a \u0002harlot\u0002, made the unfortunate mistake of "+ "visiting a wolf's house last night and is "+ "now dead.").format(harlot)) bywolves.add(harlot) onlybywolves.add(harlot) dead.append(harlot) for bodyguard in var.ROLES["bodyguard"]: if var.GUARDED.get(bodyguard) in var.list_players(var.WOLF_ROLES) and bodyguard not in dead and bodyguard not in bitten: r = random.random() if r < var.BODYGUARD_DIES_CHANCE: bywolves.add(bodyguard) onlybywolves.add(bodyguard) if var.ROLE_REVEAL == "on": message.append(("\u0002{0}\u0002, a \u0002bodyguard\u0002, "+ "made the unfortunate mistake of guarding a wolf "+ "last night, and is now dead.").format(bodyguard)) else: # off and team message.append(("\u0002{0}\u0002 "+ "made the unfortunate mistake of guarding a wolf "+ "last night, and is now dead.").format(bodyguard)) dead.append(bodyguard) for gangel in var.ROLES["guardian angel"]: if var.GUARDED.get(gangel) in var.list_players(var.WOLF_ROLES) and gangel not in dead and gangel not in bitten: r = random.random() if r < var.GUARDIAN_ANGEL_DIES_CHANCE: bywolves.add(gangel) onlybywolves.add(gangel) if var.ROLE_REVEAL == "on": message.append(("\u0002{0}\u0002, a \u0002guardian angel\u0002, "+ "made the unfortunate mistake of guarding a wolf "+ "last night, and is now dead.").format(gangel)) else: # off and team message.append(("\u0002{0}\u0002 "+ "made the unfortunate mistake of guarding a wolf "+ "last night, and is now dead.").format(gangel)) dead.append(gangel) for victim in list(dead): if victim in var.GUNNERS.keys() and var.GUNNERS[victim] > 0 and victim in bywolves: if random.random() < var.GUNNER_KILLS_WOLF_AT_NIGHT_CHANCE: # pick a random wolf to be shot, but don't kill off werecrows that observed killlist = [wolf for wolf in var.list_players(var.WOLF_ROLES) if wolf not in var.OBSERVED.keys() and wolf not in dead] if killlist: deadwolf = random.choice(killlist) if var.ROLE_REVEAL in ("on", "team"): message.append(("Fortunately, \u0002{0}\u0002 had bullets and "+ "\u0002{1}\u0002, a \u0002{2}\u0002, was shot dead.").format(victim, deadwolf, var.get_reveal_role(deadwolf))) else: message.append(("Fortunately, \u0002{0}\u0002 had bullets and "+ "\u0002{1}\u0002 was shot dead.").format(victim, deadwolf)) dead.append(deadwolf) var.GUNNERS[victim] -= 1 # deduct the used bullet for victim in dead: if victim in bywolves and victim in var.DISEASED: var.DISEASED_WOLVES = True if var.WOLF_STEALS_GUN and victim in bywolves and victim in var.GUNNERS.keys() and var.GUNNERS[victim] > 0: # victim has bullets try: looters = var.list_players(var.WOLFCHAT_ROLES) while len(looters) > 0: guntaker = random.choice(looters) # random looter if guntaker not in dead: break else: looters.remove(guntaker) if guntaker not in dead: numbullets = var.GUNNERS[victim] if guntaker not in var.WOLF_GUNNERS: var.WOLF_GUNNERS[guntaker] = 0 var.WOLF_GUNNERS[guntaker] += 1 # transfer bullets a wolf mmsg = ("While searching {0}'s belongings, you found " + "a gun loaded with 1 silver bullet! " + "You may only use it during the day. " + "If you shoot at a wolf, you will intentionally miss. " + "If you shoot a villager, it is likely that they will be injured.") mmsg = mmsg.format(victim) pm(cli, guntaker, mmsg) except IndexError: pass # no wolves to give gun to (they were all killed during night or something) var.GUNNERS[victim] = 0 # just in case cli.msg(chan, "\n".join(message)) for chump in var.BITTEN.keys(): if chump not in dead and var.get_role(chump) not in var.WOLF_ROLES: pm(cli, chump, get_bitten_message(chump)) for chump in bitten: if chump not in dead and chump not in var.WOLF_ROLES: if chump in var.ROLES["harlot"] and var.HVISITED.get(chump): pm(cli, chump, "While out visiting last night, you were overcome by a fierce-looking wolf and bitten on your neck...") else: pm(cli, chump, "You woke up today feeling light-headed, and you notice some odd bite marks on your neck...") for deadperson in dead: # kill each player, but don't end the game if one group outnumbers another # take a shortcut for killer_role here since vengeful ghost only cares about team and not particular roles # this will have to be modified to track the actual killer if that behavior changes # we check if they have already been killed as well since del_player could do chain reactions and we want # to avoid sending duplicate messages. if deadperson in var.list_players(): del_player(cli, deadperson, end_game = False, killer_role = "wolf" if deadperson in onlybywolves or deadperson in wolfghostvictims else "villager", deadlist = dead, original = deadperson) message = [] for player, tlist in itertools.groupby(havetotem): ntotems = len(list(tlist)) message.append("\u0002{0}\u0002 seem{1} to be in possession of {2} mysterious totem{3}...".format( player, "ed" if player in dead else "s", "a" if ntotems == 1 else "\u0002{0}\u0002".format(ntotems), "s" if ntotems > 1 else "")) for brokentotem in brokentotem: message.append("Broken totem pieces were found next to \u0002{0}\u0002's body...".format(brokentotem)) cli.msg(chan, "\n".join(message)) if chk_win(cli): # if after the last person is killed, one side wins, then actually end the game here return begin_day(cli) def chk_nightdone(cli): if var.PHASE != "night": return # TODO: alphabetize and/or arrange sensibly pl = var.list_players() actedcount = len(var.SEEN + list(var.HVISITED.keys()) + list(var.GUARDED.keys()) + list(var.KILLS.keys()) + list(var.OTHER_KILLS.keys()) + list(var.OBSERVED.keys()) + var.PASSED + var.HEXED + list(var.SHAMANS.keys()) + var.CURSED + list(var.CHARMERS)) nightroles = (var.ROLES["seer"] + var.ROLES["oracle"] + var.ROLES["harlot"] + var.ROLES["bodyguard"] + var.ROLES["guardian angel"] + var.ROLES["wolf"] + var.ROLES["werecrow"] + var.ROLES["alpha wolf"] + var.ROLES["sorcerer"] + var.ROLES["hunter"] + list(var.VENGEFUL_GHOSTS.keys()) + var.ROLES["hag"] + var.ROLES["shaman"] + var.ROLES["crazed shaman"] + var.ROLES["augur"] + var.ROLES["werekitten"] + var.ROLES["warlock"] + var.ROLES["piper"] + var.ROLES["wolf mystic"] + var.ROLES["fallen angel"]) if var.FIRST_NIGHT: actedcount += len(var.MATCHMAKERS + list(var.CLONED.keys())) nightroles += var.ROLES["matchmaker"] + var.ROLES["clone"] if var.DISEASED_WOLVES: nightroles = [p for p in nightroles if p not in var.list_players(("wolf", "alpha wolf", "werekitten", "wolf mystic", "fallen angel"))] elif var.ALPHA_ENABLED: # don't re-add alphas here since they can only kill *or* bite, not both actedcount += len([p for p in var.ALPHA_WOLVES if p in var.ROLES["alpha wolf"]]) for p in var.HUNTERS: # only remove one instance of their name if they have used hunter ability, in case they have templates # the OTHER_KILLS check ensures we only remove them if they acted in a *previous* night if p in var.ROLES["hunter"] and p not in var.OTHER_KILLS: nightroles.remove(p) # but remove all instances of their name if they are silenced nightroles = [p for p in nightroles if p not in var.SILENCED] # add in turncoats who should be able to act -- if they passed they're already in var.PASSED # but if they can act they're in var.TURNCOATS where the second tuple item is the current night # (if said tuple item is the previous night, then they are not allowed to act tonight) for tc, tu in var.TURNCOATS.items(): if tc not in pl: continue if tu[1] == var.NIGHT_COUNT: nightroles.append(tc) actedcount += 1 elif tu[1] < var.NIGHT_COUNT - 1: nightroles.append(tc) playercount = len(nightroles) + var.ACTED_EXTRA if var.PHASE == "night" and actedcount >= playercount: # check for assassins that have not yet targeted # must be handled separately because assassin only acts on nights when their target is dead # and silenced assassin shouldn't add to actedcount for ass in var.ROLES["assassin"]: if ass not in var.TARGETED and ass not in var.SILENCED: return if not var.DISEASED_WOLVES: # flatten var.KILLS kills = set() for ls in var.KILLS.values(): if not isinstance(ls, str): for v in ls: kills.add(v) else: kills.add(ls) # check if wolves are actually agreeing # allow len(kills) == 0 through as that means that crow was dumb and observed instead # of killingor alpha wolf was alone and chose to bite instead of kill if not var.ANGRY_WOLVES and len(kills) > 1: return elif var.ANGRY_WOLVES and (len(kills) == 1 or len(kills) > 2): return for x, t in var.TIMERS.items(): t[0].cancel() var.TIMERS = {} if var.PHASE == "night": # Double check transition_day(cli) @cmd("nolynch", "nl", "novote", "nv", "abstain", "abs", playing=True, phases=("day",)) def no_lynch(cli, nick, chan, rest): """Allows you to abstain from voting for the day.""" if chan == botconfig.CHANNEL: if not var.ABSTAIN_ENABLED: cli.notice(nick, "This command has been disabled.") return elif var.LIMIT_ABSTAIN and var.ABSTAINED: cli.notice(nick, "The village has already abstained once this game and may not do so again.") return elif var.LIMIT_ABSTAIN and var.FIRST_DAY: cli.notice(nick, "The village may not abstain on the first day.") return elif nick in var.WOUNDED: cli.msg(chan, "{0}: You are wounded and resting, thus you are unable to vote for the day.".format(nick)) return candidates = var.VOTES.keys() for voter in list(candidates): if nick in var.VOTES[voter]: var.VOTES[voter].remove(nick) if not var.VOTES[voter]: del var.VOTES[voter] if nick not in var.NO_LYNCH: var.NO_LYNCH.append(nick) cli.msg(chan, "\u0002{0}\u0002 votes not to lynch anyone today.".format(nick)) chk_decision(cli) return @cmd("lynch", playing=True, pm=True, phases=("day",)) def lynch(cli, nick, chan, rest): """Use this to vote for a candidate to be lynched.""" if not rest: show_votes.caller(cli, nick, chan, rest) return if chan != botconfig.CHANNEL: return rest = re.split(" +",rest)[0].strip() if nick in var.WOUNDED: cli.msg(chan, ("{0}: You are wounded and resting, "+ "thus you are unable to vote for the day.").format(nick)) return if nick in var.ASLEEP: pm(cli, nick, "As you place your vote, your totem emits a brilliant flash of light. " + "After recovering, you notice that you are still in your bed. " + "That entire sequence of events must have just been a dream...") return if nick in var.NO_LYNCH: var.NO_LYNCH.remove(nick) voted = get_victim(cli, nick, rest, True, var.SELF_LYNCH_ALLOWED) if not voted: return if not var.SELF_LYNCH_ALLOWED: if nick == voted: if nick in var.ROLES["fool"] or nick in var.ROLES["jester"]: cli.notice(nick, "You may not vote yourself.") else: cli.notice(nick, "Please try to save yourself.") return lcandidates = list(var.VOTES.keys()) for voters in lcandidates: # remove previous vote if nick in var.VOTES[voters]: var.VOTES[voters].remove(nick) if not var.VOTES.get(voters) and voters != voted: del var.VOTES[voters] break if voted not in var.VOTES.keys(): var.VOTES[voted] = [nick] else: var.VOTES[voted].append(nick) cli.msg(chan, ("\u0002{0}\u0002 votes for "+ "\u0002{1}\u0002.").format(nick, voted)) var.LAST_VOTES = None # reset chk_decision(cli) # chooses a target given nick, taking luck totem/misdirection totem into effect # returns the actual target def choose_target(actor, nick): pl = var.list_players() if actor in var.MISDIRECTED: i = var.ALL_PLAYERS.index(nick) if random.randint(0, 1) == 0: # going left while True: i -= 1 if i < 0: i = len(var.ALL_PLAYERS) - 1 if var.ALL_PLAYERS[i] in pl: nick = var.ALL_PLAYERS[i] break else: # going right while True: i += 1 if i >= len(var.ALL_PLAYERS): i = 0 if var.ALL_PLAYERS[i] in pl: nick = var.ALL_PLAYERS[i] break if nick in var.LUCKY: i = var.ALL_PLAYERS.index(nick) if random.randint(0, 1) == 0: # going left while True: i -= 1 if i < 0: i = len(var.ALL_PLAYERS) - 1 if var.ALL_PLAYERS[i] in pl: nick = var.ALL_PLAYERS[i] break else: # going right while True: i += 1 if i >= len(var.ALL_PLAYERS): i = 0 if var.ALL_PLAYERS[i] in pl: nick = var.ALL_PLAYERS[i] break return nick # returns true if a swap happened # check for that to short-circuit the nightrole def check_exchange(cli, actor, nick): #some roles can act on themselves, ignore this if actor == nick: return False if nick in var.EXCHANGED: var.EXCHANGED.remove(nick) actor_role = var.get_role(actor) nick_role = var.get_role(nick) # var.PASSED is used by many roles if actor in var.PASSED: var.PASSED.remove(actor) if actor_role == "amnesiac": actor_role = var.AMNESIAC_ROLES[actor] if nick in var.AMNESIAC_ROLES: var.AMNESIAC_ROLES[actor] = var.AMNESIAC_ROLES[nick] var.AMNESIAC_ROLES[nick] = actor_role else: del var.AMNESIAC_ROLES[actor] var.AMNESIAC_ROLES[nick] = actor_role elif actor_role == "clone": if actor in var.CLONED: actor_target = var.CLONED[actor] del var.CLONED[actor] elif actor_role in var.TOTEM_ORDER: actor_totem = var.TOTEMS[actor] del var.TOTEMS[actor] if actor in var.SHAMANS: del var.SHAMANS[actor] if actor in var.LASTGIVEN: del var.LASTGIVEN[actor] elif actor_role in ("wolf", "werekitten", "wolf mystic", "fallen angel"): if actor in var.KILLS: del var.KILLS[actor] elif actor_role == "hunter": if actor in var.OTHER_KILLS: var.ACTED_EXTRA += 1 if actor in var.HUNTERS: var.HUNTERS.remove(actor) elif actor_role in ("bodyguard", "guardian angel"): if actor in var.GUARDED: pm(cli, var.GUARDED[actor], "Your protector seems to have disappeared...") del var.GUARDED[actor] if actor in var.LASTGUARDED: del var.LASTGUARDED[actor] elif actor_role in ("werecrow", "sorcerer"): if actor in var.OBSERVED: del var.OBSERVED[actor] if actor in var.KILLS: del var.KILLS[actor] elif actor_role == "harlot": if actor in var.HVISITED: if var.HVISITED[actor] is not None: pm(cli, var.HVISITED[actor], "\u0002{0}\u0002 seems to have disappeared...".format(actor)) del var.HVISITED[actor] elif actor_role in ("seer", "oracle", "augur"): if actor in var.SEEN: var.SEEN.remove(actor) elif actor_role == "hag": if actor in var.LASTHEXED: if var.LASTHEXED[actor] in var.TOBESILENCED and actor in var.HEXED: var.TOBESILENCED.remove(var.LASTHEXED[actor]) del var.LASTHEXED[actor] if actor in var.HEXED: var.HEXED.remove(actor) elif actor_role == "doctor": if nick_role == "doctor": temp_immunizations = var.DOCTORS[actor] var.DOCTORS[actor] = var.DOCTORS[nick] var.DOCTORS[nick] = temp_immunizations else: var.DOCTORS[nick] = var.DOCTORS[actor] del var.DOCTORS[actor] elif actor_role == "alpha wolf": if actor in var.ALPHA_WOLVES: var.ALPHA_WOLVES.remove(actor) if actor in var.KILLS: del var.KILLS[actor] elif actor_role == "warlock": if actor in var.CURSED: var.CURSED.remove(actor) elif actor_role == "turncoat": del var.TURNCOATS[actor] # var.PASSED is used by many roles if nick in var.PASSED: var.PASSED.remove(nick) if nick_role == "amnesiac": if actor not in var.AMNESIAC_ROLES: nick_role = var.AMNESIAC_ROLES[nick] var.AMNESIAC_ROLES[actor] = nick_role del var.AMNESIAC_ROLES[nick] else: # we swapped amnesiac_roles earlier on, get our version back nick_role = var.AMNESIAC_ROLES[actor] elif nick_role == "clone": if nick in var.CLONED: nick_target = var.CLONED[nick] del var.CLONED[nick] elif nick_role in var.TOTEM_ORDER: nick_totem = var.TOTEMS[nick] del var.TOTEMS[nick] if nick in var.SHAMANS: del var.SHAMANS[nick] if nick in var.LASTGIVEN: del var.LASTGIVEN[nick] elif nick_role in ("wolf", "werekitten", "wolf mystic", "fallen angel"): if nick in var.KILLS: del var.KILLS[nick] elif nick_role == "hunter": if nick in var.OTHER_KILLS: var.ACTED_EXTRA += 1 if nick in var.HUNTERS: var.HUNTERS.remove(nick) elif nick_role in ("bodyguard", "guardian angel"): if nick in var.GUARDED: pm(cli, var.GUARDED[nick], "Your protector seems to have disappeared...") del var.GUARDED[nick] if nick in var.LASTGUARDED: del var.LASTGUARDED[nick] elif nick_role in ("werecrow", "sorcerer"): if nick in var.OBSERVED: del var.OBSERVED[nick] if nick in var.KILLS: del var.KILLS[nick] elif nick_role == "harlot": if nick in var.HVISITED: if var.HVISITED[nick] is not None: pm(cli, var.HVISITED[nick], "\u0002{0}\u0002 seems to have disappeared...".format(nick)) del var.HVISITED[nick] elif nick_role in ("seer", "oracle", "augur"): if nick in var.SEEN: var.SEEN.remove(nick) elif nick_role == "hag": if nick in var.LASTHEXED: if var.LASTHEXED[nick] in var.TOBESILENCED and nick in var.HEXED: var.TOBESILENCED.remove(var.LASTHEXED[nick]) del var.LASTHEXED[nick] if nick in var.HEXED: var.HEXED.remove(nick) elif nick_role == "doctor": # Both being doctors is handled above if actor_role != "doctor": var.DOCTORS[actor] = var.DOCTORS[nick] del var.DOCTORS[nick] elif nick_role == "alpha wolf": if nick in var.ALPHA_WOLVES: var.ALPHA_WOLVES.remove(nick) if nick in var.KILLS: del var.KILLS[nick] elif nick_role == "warlock": if nick in var.CURSED: var.CURSED.remove(nick) elif nick_role == "turncoat": del var.TURNCOATS[nick] var.FINAL_ROLES[actor] = nick_role var.FINAL_ROLES[nick] = actor_role var.ROLES[actor_role].append(nick) var.ROLES[actor_role].remove(actor) var.ROLES[nick_role].append(actor) var.ROLES[nick_role].remove(nick) if actor in var.BITTEN_ROLES.keys(): if nick in var.BITTEN_ROLES.keys(): var.BITTEN_ROLES[actor], var.BITTEN_ROLES[nick] = var.BITTEN_ROLES[nick], var.BITTEN_ROLES[actor] else: var.BITTEN_ROLES[nick] = var.BITTEN_ROLES[actor] del var.BITTEN_ROLES[actor] elif nick in var.BITTEN_ROLES.keys(): var.BITTEN_ROLES[actor] = var.BITTEN_ROLES[nick] del var.BITTEN_ROLES[nick] if actor in var.LYCAN_ROLES.keys(): if nick in var.LYCAN_ROLES.keys(): var.LYCAN_ROLES[actor], var.LYCAN_ROLES[nick] = var.LYCAN_ROLES[nick], var.LYCAN_ROLES[actor] else: var.LYCAN_ROLES[nick] = var.LYCAN_ROLES[actor] del var.LYCAN_ROLES[actor] elif nick in var.LYCAN_ROLES.keys(): var.LYCAN_ROLES[actor] = var.LYCAN_ROLES[nick] del var.LYCAN_ROLES[nick] actor_rev_role = actor_role if actor_role == "vengeful ghost": actor_rev_role = var.DEFAULT_ROLE elif actor_role == "time lord": actor_rev_role = "villager" nick_rev_role = nick_role if nick_role == "vengeful ghost": nick_rev_role = var.DEFAULT_ROLE elif actor_role == "time lord": nick_rev_role = "villager" # don't say who, since misdirection/luck totem may have switched it # and this makes life far more interesting pm(cli, actor, "You have exchanged roles with someone! You are now a \u0002{0}\u0002.".format(nick_rev_role)) pm(cli, nick, "You have exchanged roles with someone! You are now a \u0002{0}\u0002.".format(actor_rev_role)) if nick_role == "clone": pm(cli, actor, "You are cloning \u0002{0}\u0002.".format(nick_target)) elif nick_role in var.TOTEM_ORDER: if nick_role == "shaman": pm(cli, actor, "You have a \u0002{0}\u0002 totem.".format(nick_totem)) var.TOTEMS[actor] = nick_totem elif nick_role == "mystic": numevil = len(var.list_players(var.WOLFTEAM_ROLES)) pm(cli, actor, "There {0} \u0002{1}\u0002 evil villager{2} still alive.".format("are" if numevil != 1 else "is", numevil, "s" if numevil != 1 else "")) elif nick_role in var.WOLFCHAT_ROLES and actor_role not in var.WOLFCHAT_ROLES: pl = var.list_players() random.shuffle(pl) pl.remove(actor) # remove self from list for i, player in enumerate(pl): prole = var.get_role(player) if prole in var.WOLFCHAT_ROLES: cursed = "" if player in var.ROLES["cursed villager"]: cursed = "cursed " pl[i] = "\u0002{0}\u0002 ({1}{2})".format(player, cursed, prole) pm(cli, player, "\u0002{0}\u0002 and \u0002{1}\u0002 have exchanged roles!".format(nick, actor)) elif player in var.ROLES["cursed villager"]: pl[i] = player + " (cursed)" pm(cli, actor, "Players: " + ", ".join(pl)) if actor_role == "wolf mystic": # # of special villagers = # of players - # of villagers - # of wolves - # of neutrals numvills = len(ps) - len(var.list_players(var.WOLFTEAM_ROLES)) - len(var.list_players(("villager", "vengeful ghost", "time lord", "amnesiac", "lycan"))) - len(var.list_players(var.TRUE_NEUTRAL_ROLES)) pm(cli, actor, "There {0} \u0002{1}\u0002 special villager{2} still alive.".format("are" if numvills != 1 else "is", numvills, "s" if numvills != 1 else "")) if var.DISEASED_WOLVES: pm(cli, actor, 'You are feeling ill tonight, and are unable to kill anyone.') elif var.ANGRY_WOLVES and actor_role in var.WOLF_ROLES and actor_role != "wolf cub": pm(cli, actor, 'You are \u0002angry\u0002 tonight, and may kill two targets by using "kill <nick1> and <nick2>".') if var.ALPHA_ENABLED and actor_role == "alpha wolf" and actor not in var.ALPHA_WOLVES: pm(cli, actor, ('You may use "bite <nick>" tonight in order to turn the wolves\' target into a wolf instead of killing them. ' + 'They will turn into a wolf in {0} night{1}.').format(var.ALPHA_WOLF_NIGHTS, 's' if var.ALPHA_WOLF_NIGHTS > 1 else '')) elif nick_role == "minion": wolves = var.list_players(var.WOLF_ROLES) random.shuffle(wolves) pm(cli, actor, "Wolves: " + ", ".join(wolves)) elif nick_role == "turncoat": var.TURNCOATS[actor] = ("none", -1) if actor_role == "clone": pm(cli, nick, "You are cloning \u0002{0}\u0002.".format(actor_target)) elif actor_role in var.TOTEM_ORDER: if actor_role == "shaman": pm(cli, nick, "You have a \u0002{0}\u0002 totem.".format(actor_totem)) var.TOTEMS[nick] = actor_totem elif actor_role == "mystic": numevil = len(var.list_players(var.WOLFTEAM_ROLES)) pm(cli, nick, "There {0} \u0002{1}\u0002 evil villager{2} still alive.".format("are" if numevil != 1 else "is", numevil, "s" if numevil != 1 else "")) elif actor_role in var.WOLFCHAT_ROLES and nick_role not in var.WOLFCHAT_ROLES: pl = var.list_players() random.shuffle(pl) pl.remove(nick) # remove self from list for i, player in enumerate(pl): prole = var.get_role(player) if prole in var.WOLFCHAT_ROLES: cursed = "" if player in var.ROLES["cursed villager"]: cursed = "cursed " pl[i] = "\u0002{0}\u0002 ({1}{2})".format(player, cursed, prole) pm(cli, player, "\u0002{0}\u0002 and \u0002{1}\u0002 have exchanged roles!".format(actor, nick)) elif player in var.ROLES["cursed villager"]: pl[i] = player + " (cursed)" pm(cli, nick, "Players: " + ", ".join(pl)) if nick_role == "wolf mystic": # # of special villagers = # of players - # of villagers - # of wolves - # of neutrals numvills = len(ps) - len(var.list_players(var.WOLFTEAM_ROLES)) - len(var.list_players(("villager", "vengeful ghost", "time lord", "amnesiac", "lycan"))) - len(var.list_players(var.TRUE_NEUTRAL_ROLES)) pm(cli, nick, "There {0} \u0002{1}\u0002 special villager{2} still alive.".format("are" if numvills != 1 else "is", numvills, "s" if numvills != 1 else "")) if var.DISEASED_WOLVES: pm(cli, nick, 'You are feeling ill tonight, and are unable to kill anyone.') elif var.ANGRY_WOLVES and nick_role in ("wolf", "werecrow", "alpha wolf", "werekitten"): pm(cli, nick, 'You are \u0002angry\u0002 tonight, and may kill two targets by using "kill <nick1> and <nick2>".') if var.ALPHA_ENABLED and nick_role == "alpha wolf" and nick not in var.ALPHA_WOLVES: pm(cli, nick, ('You may use "bite <nick>" tonight in order to turn the wolves\' target into a wolf instead of killing them. ' + 'They will turn into a wolf in {0} night{1}.').format(var.ALPHA_WOLF_NIGHTS, 's' if var.ALPHA_WOLF_NIGHTS > 1 else '')) elif actor_role == "minion": wolves = var.list_players(var.WOLF_ROLES) random.shuffle(wolves) pm(cli, nick, "Wolves: " + ", ".join(wolves)) elif actor_role == "turncoat": var.TURNCOATS[nick] = ("none", -1) var.EXCHANGED_ROLES.append((actor, nick)) return True return False @cmd("retract", "r", pm=True, playing=True, phases=("day", "night")) def retract(cli, nick, chan, rest): """Takes back your vote during the day (for whom to lynch).""" if chan not in (botconfig.CHANNEL, nick): return if chan == nick: # PM, use different code role = var.get_role(nick) if role not in var.WOLF_ROLES + ["hunter"] and nick not in var.VENGEFUL_GHOSTS.keys(): return if role == "wolf cub": return if var.PHASE != "night": return if role == "werecrow": # Check if already observed if var.OBSERVED.get(nick): pm(cli, nick, ("You have already transformed into a crow, and "+ "cannot turn back until day.")) return elif role == "hunter" and nick in var.HUNTERS and nick not in var.OTHER_KILLS.keys(): return if role in var.WOLF_ROLES and nick in var.KILLS.keys(): del var.KILLS[nick] pm(cli, nick, "You have retracted your kill.") wolfchatwolves = var.list_players(var.WOLFCHAT_ROLES) for wolf in wolfchatwolves: if wolf != nick: pm(cli, wolf, "\u0002{0}\u0002 has retracted their kill.".format(nick)) elif role not in var.WOLF_ROLES and nick in var.OTHER_KILLS.keys(): del var.OTHER_KILLS[nick] var.HUNTERS.remove(nick) pm(cli, nick, "You have retracted your kill.") elif role == "alpha wolf" and nick in var.BITE_PREFERENCES.keys(): del var.BITE_PREFERENCES[nick] var.ALPHA_WOLVES.remove(nick) pm(cli, nick, "You have decided not to bite anyone tonight.") wolfchatwolves = var.list_players(var.WOLFCHAT_ROLES) for wolf in wolfchatwolves: if wolf != nick: pm(cli, wolf, "\u0002{0}\u0002 has decided not to bite anyone tonight.".format(nick)) elif role == "alpha wolf" and var.ALPHA_ENABLED: pm(cli, nick, "You have not chosen to kill or bite anyone yet.") else: pm(cli, nick, "You have not chosen to kill anyone yet.") return if var.PHASE != "day": return if nick in var.NO_LYNCH: var.NO_LYNCH.remove(nick) cli.msg(chan, "\u0002{0}\u0002's vote was retracted.".format(nick)) var.LAST_VOTES = None # reset return candidates = var.VOTES.keys() for voter in list(candidates): if nick in var.VOTES[voter]: var.VOTES[voter].remove(nick) if not var.VOTES[voter]: del var.VOTES[voter] cli.msg(chan, "\u0002{0}\u0002's vote was retracted.".format(nick)) var.LAST_VOTES = None # reset break else: cli.notice(nick, "You haven't voted yet.") @cmd("shoot", playing=True, silenced=True, phases=("day",)) def shoot(cli, nick, chan, rest): """Use this to fire off a bullet at someone in the day if you have bullets.""" if chan != botconfig.CHANNEL: return if nick not in var.GUNNERS.keys() and nick not in var.WOLF_GUNNERS.keys(): cli.notice(nick, "You don't have a gun.") return elif ((nick in var.GUNNERS.keys() and not var.GUNNERS[nick]) or ((nick not in var.GUNNERS.keys() or not var.GUNNERS[nick]) and nick in var.WOLF_GUNNERS.keys() and not var.WOLF_GUNNERS[nick])): cli.notice(nick, "You don't have any more bullets.") return victim = get_victim(cli, nick, re.split(" +",rest)[0], True) if not victim: return if victim == nick: cli.notice(nick, "You are holding it the wrong way.") return # get actual victim victim = choose_target(nick, victim) wolfshooter = nick in var.list_players(var.WOLFCHAT_ROLES) if wolfshooter and nick in var.WOLF_GUNNERS and var.WOLF_GUNNERS[nick]: var.WOLF_GUNNERS[nick] -= 1 else: var.GUNNERS[nick] -= 1 rand = random.random() if nick in var.ROLES["village drunk"]: chances = var.DRUNK_GUN_CHANCES elif nick in var.ROLES["sharpshooter"]: chances = var.SHARPSHOOTER_GUN_CHANCES else: chances = var.GUN_CHANCES wolfvictim = victim in var.list_players(var.WOLF_ROLES) realrole = var.get_role(victim) victimrole = var.get_reveal_role(victim) alwaysmiss = (realrole == "werekitten") if rand <= chances[0] and not (wolfshooter and wolfvictim) and not alwaysmiss: # didn't miss or suicide and it's not a wolf shooting another wolf cli.msg(chan, ("\u0002{0}\u0002 shoots \u0002{1}\u0002 with "+ "a silver bullet!").format(nick, victim)) an = "n" if victimrole.startswith(("a", "e", "i", "o", "u")) else "" if realrole in var.WOLF_ROLES: if var.ROLE_REVEAL == "on": cli.msg(chan, ("\u0002{0}\u0002 is a{1} \u0002{2}\u0002, and is dying from "+ "the silver bullet.").format(victim,an, victimrole)) else: # off and team cli.msg(chan, ("\u0002{0}\u0002 is a wolf, and is dying from "+ "the silver bullet.").format(victim)) if not del_player(cli, victim, killer_role = var.get_role(nick)): return elif random.random() <= chances[3]: accident = "accidentally " if nick in var.ROLES["sharpshooter"]: accident = "" # it's an accident if the sharpshooter DOESN'T headshot :P cli.msg(chan, ("\u0002{0}\u0002 is not a wolf "+ "but was {1}fatally injured.").format(victim, accident)) if var.ROLE_REVEAL in ("on", "team"): cli.msg(chan, "The village has sacrificed a{0} \u0002{1}\u0002.".format(an, victimrole)) if not del_player(cli, victim, killer_role = var.get_role(nick)): return else: cli.msg(chan, ("\u0002{0}\u0002 is a villager and was injured. Luckily "+ "the injury is minor and will heal after a day of "+ "rest.").format(victim)) if victim not in var.WOUNDED: var.WOUNDED.append(victim) lcandidates = list(var.VOTES.keys()) for cand in lcandidates: # remove previous vote if victim in var.VOTES[cand]: var.VOTES[cand].remove(victim) if not var.VOTES.get(cand): del var.VOTES[cand] break chk_decision(cli) chk_win(cli) elif rand <= chances[0] + chances[1]: cli.msg(chan, "\u0002{0}\u0002 is a lousy shooter and missed!".format(nick)) else: if var.ROLE_REVEAL in ("on", "team"): cli.msg(chan, ("Oh no! \u0002{0}\u0002's gun was poorly maintained and has exploded! "+ "The village mourns a gunner-\u0002{1}\u0002.").format(nick, var.get_reveal_role(nick))) else: cli.msg(chan, ("Oh no! \u0002{0}\u0002's gun was poorly maintained and has exploded!").format(nick)) if not del_player(cli, nick, killer_role = "villager"): # blame explosion on villager's shoddy gun construction or something return # Someone won. @cmd("kill", chan=False, pm=True, phases=("night",)) def kill(cli, nick, chan, rest): """Kill a player. Behaviour varies depending on your role.""" if (nick not in var.VENGEFUL_GHOSTS.keys() and nick not in var.list_players()) or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return try: role = var.get_role(nick) except KeyError: role = None wolfroles = list(var.WOLF_ROLES) wolfroles.remove("wolf cub") if role in var.WOLFCHAT_ROLES and role not in wolfroles: return # they do this a lot. if role not in wolfroles + ["hunter"] and nick not in var.VENGEFUL_GHOSTS.keys(): return if nick in var.VENGEFUL_GHOSTS.keys() and var.VENGEFUL_GHOSTS[nick][0] == "!": # ghost was driven away by retribution return if role == "hunter" and nick in var.HUNTERS and nick not in var.OTHER_KILLS: # they are a hunter and did not kill this night (if they killed this night, this allows them to switch) pm(cli, nick, "You have already killed someone this game.") return if nick in var.SILENCED: pm(cli, nick, "You have been silenced, and are unable to use any special powers.") return if role in wolfroles and var.DISEASED_WOLVES: pm(cli, nick, "You are feeling ill, and are unable to kill anyone tonight.") return if role == "alpha wolf" and nick in var.BITE_PREFERENCES: pm(cli, nick, 'You have chosen to bite someone tonight and cannot participate in the kill. Use "retract" if you want to not bite anyone tonight.') return pieces = re.split(" +",rest) victim = pieces[0] victim2 = None if role in wolfroles and var.ANGRY_WOLVES: if len(pieces) > 1: if len(pieces) > 2 and pieces[1].lower() == "and": victim2 = pieces[2] else: victim2 = pieces[1] else: victim2 = None if role == "werecrow": # Check if flying to observe if var.OBSERVED.get(nick): pm(cli, nick, ("You have already transformed into a crow; therefore, "+ "you are physically unable to kill a villager.")) return victim = get_victim(cli, nick, victim, False) if not victim: return if victim2 != None: victim2 = get_victim(cli, nick, victim2, False) if not victim2: return if victim == nick or victim2 == nick: if nick in var.VENGEFUL_GHOSTS.keys(): pm(cli, nick, "You are already dead.") else: pm(cli, nick, "Suicide is bad. Don't do it.") return if nick in var.VENGEFUL_GHOSTS.keys(): allwolves = var.list_players(var.WOLFTEAM_ROLES) allvills = [] for p in var.list_players(): if p not in allwolves: allvills.append(p) if var.VENGEFUL_GHOSTS[nick] == "wolves" and victim not in allwolves: pm(cli, nick, "You must target a wolf.") return elif var.VENGEFUL_GHOSTS[nick] == "villagers" and victim not in allvills: pm(cli, nick, "You must target a villager.") return if role in wolfroles: wolfchatwolves = var.list_players(var.WOLFCHAT_ROLES) if victim in wolfchatwolves or victim2 in wolfchatwolves: pm(cli, nick, "You may only kill villagers, not other wolves.") return if var.ANGRY_WOLVES and victim2 != None: if victim == victim2: pm(cli, nick, "You should select two different players.") return else: rv = choose_target(nick, victim) rv2 = choose_target(nick, victim2) if check_exchange(cli, nick, rv): return if check_exchange(cli, nick, rv2): return var.KILLS[nick] = [rv, rv2] else: rv = choose_target(nick, victim) if check_exchange(cli, nick, rv): return var.KILLS[nick] = [rv] else: rv = choose_target(nick, victim) if nick not in var.VENGEFUL_GHOSTS.keys(): if check_exchange(cli, nick, rv): return var.OTHER_KILLS[nick] = rv if role == "hunter": if nick not in var.HUNTERS: var.HUNTERS.append(nick) if nick in var.PASSED: var.PASSED.remove(nick) if victim2 != None: msg = " selected \u0002{0}\u0002 and \u0002{1}\u0002 to be killed.".format(victim, victim2) pm(cli, nick, "You have{0}".format(msg)) else: msg = " selected \u0002{0}\u0002 to be killed.".format(victim) pm(cli, nick, "You have{0}".format(msg)) if var.ANGRY_WOLVES and role in wolfroles: pm(cli, nick, 'You are angry tonight and may kill a second target. Use "kill <nick1> and <nick2>" to select multiple targets.') wolfchatwolves = var.list_players(var.WOLFCHAT_ROLES) if nick in wolfchatwolves: for wolf in wolfchatwolves: if wolf != nick: pm(cli, wolf, "\u0002{0}\u0002 has{1}".format(nick, msg)) if victim2: debuglog("{0} ({1}) KILL: {2} and {3} ({4})".format(nick, role, victim, victim2, var.get_role(victim2))) else: debuglog("{0} ({1}) KILL: {2} ({3})".format(nick, role, victim, var.get_role(victim))) chk_nightdone(cli) @cmd("guard", "protect", "save", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("bodyguard", "guardian angel")) def guard(cli, nick, chan, rest): """Guard a player, preventing them from being targetted that night.""" if var.GUARDED.get(nick): pm(cli, nick, "You are already protecting someone tonight.") return role = var.get_role(nick) victim = get_victim(cli, nick, re.split(" +",rest)[0], False, role == "bodyguard" or var.GUARDIAN_ANGEL_CAN_GUARD_SELF) if not victim: return if role == "guardian angel" and var.LASTGUARDED.get(nick) == victim: pm(cli, nick, ("You protected \u0002{0}\u0002 last night. " + "You cannot protect the same person two nights in a row.").format(victim)) return if victim == nick: if role == "bodyguard" or not var.GUARDIAN_ANGEL_CAN_GUARD_SELF: pm(cli, nick, "You cannot guard yourself. Use pass if you do not wish to guard anyone tonight.") return elif role == "guardian angel": # choosing to guard self bypasses lucky/misdirection var.GUARDED[nick] = nick var.LASTGUARDED[nick] = nick pm(cli, nick, "You have decided to guard yourself tonight.") else: victim = choose_target(nick, victim) if check_exchange(cli, nick, victim): return var.GUARDED[nick] = victim var.LASTGUARDED[nick] = victim pm(cli, nick, "You are protecting \u0002{0}\u0002 tonight. Farewell!".format(var.GUARDED[nick])) pm(cli, var.GUARDED[nick], "You can sleep well tonight, for you are being protected.") debuglog("{0} ({1}) GUARD: {2} ({3})".format(nick, role, victim, var.get_role(victim))) chk_nightdone(cli) @cmd("observe", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("werecrow", "sorcerer")) def observe(cli, nick, chan, rest): """Observe a player to obtain various information.""" role = var.get_role(nick) victim = get_victim(cli, nick, re.split(" +",rest)[0], False) if not victim: return if victim == nick: if role == "werecrow": pm(cli, nick, "Instead of doing that, you should probably go kill someone.") else: pm(cli, nick, "That would be a waste.") return if nick in var.OBSERVED.keys(): if role == "werecrow": pm(cli, nick, "You are already flying to \u0002{0}\u0002's house.".format(var.OBSERVED[nick])) else: pm(cli, nick, "You have already observed tonight.") return if var.get_role(victim) in var.WOLFCHAT_ROLES: if role == "werecrow": pm(cli, nick, "Flying to another wolf's house is a waste of time.") else: pm(cli, nick, "Observing another wolf is a waste of time.") return victim = choose_target(nick, victim) if check_exchange(cli, nick, victim): return var.OBSERVED[nick] = victim if nick in var.KILLS.keys(): del var.KILLS[nick] if role == "werecrow": pm(cli, nick, ("You transform into a large crow and start your flight "+ "to \u0002{0}'s\u0002 house. You will return after "+ "collecting your observations when day begins.").format(victim)) wolfchatwolves = var.list_players(var.WOLFCHAT_ROLES) for wolf in wolfchatwolves: if wolf != nick: pm(cli, wolf, "\u0002{0}\u0002 is observing \u0002{1}\u0002.".format(nick, victim)) elif role == "sorcerer": vrole = var.get_role(victim) if vrole == "amnesiac": vrole = var.AMNESIAC_ROLES[victim] if vrole in ("seer", "oracle", "augur", "sorcerer"): an = "n" if vrole.startswith(("a", "e", "i", "o", "u")) else "" pm(cli, nick, ("After casting your ritual, you determine that \u0002{0}\u0002 " + "is a{1} \u0002{2}\u0002!").format(victim, an, vrole)) else: pm(cli, nick, ("After casting your ritual, you determine that \u0002{0}\u0002 " + "does not have paranormal senses.").format(victim)) debuglog("{0} ({1}) OBSERVE: {2} ({3})".format(nick, role, victim, var.get_role(victim))) chk_nightdone(cli) @cmd("id", chan=False, pm=True, playing=True, silenced=True, phases=("day",), roles=("detective",)) def investigate(cli, nick, chan, rest): """Investigate a player to determine their exact role.""" if nick in var.INVESTIGATED: pm(cli, nick, "You may only investigate one person per round.") return victim = get_victim(cli, nick, re.split(" +",rest)[0], False) if not victim: return if victim == nick: pm(cli, nick, "Investigating yourself would be a waste.") return victim = choose_target(nick, victim) var.INVESTIGATED.append(nick) vrole = var.get_role(victim) if vrole == "amnesiac": vrole = var.AMNESIAC_ROLES[victim] pm(cli, nick, ("The results of your investigation have returned. \u0002{0}\u0002"+ " is a... \u0002{1}\u0002!").format(victim, vrole)) debuglog("{0} ({1}) ID: {2} ({3})".format(nick, var.get_role(nick), victim, vrole)) if random.random() < var.DETECTIVE_REVEALED_CHANCE: # a 2/5 chance (should be changeable in settings) # The detective's identity is compromised! for badguy in var.list_players(var.WOLFCHAT_ROLES): pm(cli, badguy, ("Someone accidentally drops a paper. The paper reveals "+ "that \u0002{0}\u0002 is the detective!").format(nick)) debuglog("{0} ({1}) PAPER DROP".format(nick, var.get_role(nick))) @cmd("visit", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("harlot",)) def hvisit(cli, nick, chan, rest): """Visit a player. You will die if you visit a wolf or a target of the wolves.""" if var.HVISITED.get(nick): pm(cli, nick, ("You are already spending the night "+ "with \u0002{0}\u0002.").format(var.HVISITED[nick])) return victim = get_victim(cli, nick, re.split(" +",rest)[0], False, True) if not victim: return if nick == victim: # Staying home (same as calling pass, so call pass) pass_cmd.func(cli, nick, chan, "") return else: victim = choose_target(nick, victim) if check_exchange(cli, nick, victim): return var.HVISITED[nick] = victim pm(cli, nick, ("You are spending the night with \u0002{0}\u0002. "+ "Have a good time!").format(victim)) if nick != victim: #prevent luck/misdirection totem weirdness pm(cli, victim, ("You are spending the night with \u0002{0}"+ "\u0002. Have a good time!").format(nick)) debuglog("{0} ({1}) VISIT: {2} ({3})".format(nick, var.get_role(nick), victim, var.get_role(victim))) chk_nightdone(cli) def is_fake_nick(who): return re.search(r"^[0-9]+$", who) @cmd("see", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("seer", "oracle", "augur")) def see(cli, nick, chan, rest): """Use your paranormal powers to determine the role or alignment of a player.""" role = var.get_role(nick) if nick in var.SEEN: pm(cli, nick, "You may only have one vision per round.") return victim = get_victim(cli, nick, re.split(" +",rest)[0], False) if not victim: return if victim == nick: pm(cli, nick, "Seeing yourself would be a waste.") return victim = choose_target(nick, victim) if check_exchange(cli, nick, victim): return victimrole = var.get_role(victim) vrole = victimrole # keep a copy for logging if role == "seer": if (victimrole in var.SEEN_WOLF and victimrole not in var.SEEN_DEFAULT) or victim in var.ROLES["cursed villager"]: victimrole = "wolf" elif victimrole in var.SEEN_DEFAULT: victimrole = var.DEFAULT_ROLE if var.DEFAULT_SEEN_AS_VILL: victimrole = "villager" pm(cli, nick, ("You have a vision; in this vision, "+ "you see that \u0002{0}\u0002 is a "+ "\u0002{1}\u0002!").format(victim, victimrole)) debuglog("{0} ({1}) SEE: {2} ({3}) as {4}".format(nick, role, victim, vrole, victimrole)) elif role == "oracle": iswolf = False if (victimrole in var.SEEN_WOLF and victimrole not in var.SEEN_DEFAULT) or victim in var.ROLES["cursed villager"]: iswolf = True pm(cli, nick, ("Your paranormal senses are tingling! "+ "The spirits tell you that \u0002{0}\u0002 is {1}"+ "a {2}wolf{2}!").format(victim, "" if iswolf else "\u0002not\u0002 ", "\u0002" if iswolf else "")) debuglog("{0} ({1}) SEE: {2} ({3}) (Wolf: {4})".format(nick, role, victim, vrole, str(iswolf))) elif role == "augur": if victimrole == "amnesiac": victimrole = var.AMNESIAC_ROLES[victim] aura = "blue" if victimrole in var.WOLFTEAM_ROLES: aura = "red" elif victimrole in var.TRUE_NEUTRAL_ROLES: aura = "grey" pm(cli, nick, ("You have a vision; in this vision, " + "you see that \u0002{0}\u0002 exudes " + "a \u0002{1}\u0002 aura!").format(victim, aura)) debuglog("{0} ({1}) SEE: {2} ({3}) as {4} ({5} aura)".format(nick, role, victim, vrole, victimrole, aura)) var.SEEN.append(nick) chk_nightdone(cli) @cmd("give", chan=False, pm=True, playing=True, silenced=True, phases=("day", "night"), roles=var.TOTEM_ORDER+("doctor",)) def give(cli, nick, chan, rest): """Give a totem or immunization to a player.""" role = var.get_role(nick) if role in var.TOTEM_ORDER: totem.caller(cli, nick, chan, rest) elif role == "doctor": immunize.caller(cli, nick, chan, rest) @cmd("totem", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=var.TOTEM_ORDER) def totem(cli, nick, chan, rest, prefix="You"): """Give a totem to a player.""" victim = get_victim(cli, nick, re.split(" +",rest)[0], False, True) if not victim: return if nick in var.LASTGIVEN and var.LASTGIVEN[nick] == victim: pm(cli, nick, "You gave your totem to \u0002{0}\u0002 last time, you must choose someone else.".format(victim)) return totem = "" role = var.get_role(nick) if role != "crazed shaman": totem = " of " + var.TOTEMS[nick] if check_exchange(cli, nick, victim): return pm(cli, nick, ("{0} have given a totem{1} to \u0002{2}\u0002.").format(prefix, totem, victim)) var.SHAMANS[nick] = victim debuglog("{0} ({1}) TOTEM: {2} ({3})".format(nick, role, victim, totem)) chk_nightdone(cli) @cmd("immunize", "immunise", chan=False, pm=True, playing=True, silenced=True, phases=("day",), roles=("doctor",)) def immunize(cli, nick, chan, rest): """Immunize a player, preventing them from turning into a wolf.""" if nick in var.DOCTORS and var.DOCTORS[nick] == 0: pm(cli, nick, "You have run out of immunizations.") return if not nick in var.DOCTORS: # something with amnesiac or clone or exchange totem var.DOCTORS[nick] = math.ceil(var.DOCTOR_IMMUNIZATION_MULTIPLIER * len(var.ALL_PLAYERS)) victim = get_victim(cli, nick, re.split(" +",rest)[0], False, True) if not victim: return victim = choose_target(nick, victim) vrole = var.get_role(victim) if check_exchange(cli, nick, victim): return pm(cli, nick, "You have given an immunization to \u0002{0}\u0002.".format(victim)) lycan = False if vrole == "lycan": lycan = True lycan_message = ("You feel as if a curse has been lifted from you... It seems that your lycanthropy is cured " + "and you will no longer become a werewolf if targeted by the wolves!") var.ROLES["lycan"].remove(victim) var.ROLES["villager"].append(victim) var.FINAL_ROLES[victim] = "villager" var.CURED_LYCANS.append(victim) var.IMMUNIZED.add(victim) elif victim in var.BITTEN: # fun fact: immunizations in real life are done by injecting a small amount of (usually neutered) virus into the person # so that their immune system can fight it off and build up antibodies. This doesn't work very well if that person is # currently afflicted with the virus however, as you're just adding more virus to the mix... # naturally, we would want to mimic that behavior here, and what better way of indicating that things got worse than # by making the turning happen a night earlier? :) var.BITTEN[victim] -= 1 lycan_message = ("You have a brief flashback to {0} last night. " + "The event quickly subsides, but a lingering thought remains in your mind...").format( "the events of" if vrole == "guardian angel" else "your dream") else: lycan_message = "You don't feel any different..." var.IMMUNIZED.add(victim) pm(cli, victim, ("You feel a sharp prick in the back of your arm and temporarily black out. " + "When you come to, you notice an empty syringe lying on the ground. {0}").format(lycan_message)) var.DOCTORS[nick] -= 1 debuglog("{0} ({1}) IMMUNIZE: {2} ({3})".format(nick, var.get_role(nick), victim, "lycan" if lycan else var.get_role(victim))) def get_bitten_message(nick): time_left = var.BITTEN[nick] role = var.get_role(nick) message = "" if role == "guardian angel": if time_left <= 1: message = ("After returning from last night's activities, you felt another wave of pain, this time on your back. " + "Your wings grew larger and you can now fly faster and farther than ever before. Along with " + "the size change, their color shifted from pure white to a midnight black. You didn't spend much " + "time thinking on what happened, as you were tired and went to sleep shortly thereafter.") elif time_left == 2: message = ("Despite the gloves, it seems that the villagers have been keeping their distance from you as of late. " + "None of them seem to know about your changes, so the change of behavior greatly angers you. You're " + "doing just as good a job as ever, and if anything the changes make you MORE effective and powerful. " + "These thoughts lingered for the rest of last night until you finally drifted off to an uneasy sleep.") else: message = ("As you were out last night, you felt a painful sensation as your hands grew very sharp claws. " + "You figure they are now sharp enough to cut through most anything, but to avoid alarming the village " + "you decide to fashion some gloves and wear them around from now on in an attempt to show nothing is " + "happening.") else: if time_left <= 1: message = ("You had the same dream again, but this time YOU were the pursuer. You smell fear from your quarry " + "as you give an exhilerating chase, going only half your speed in order to draw out the fun. " + "Suddenly your prey trips over a rock and falls down, allowing you to close in the remaining distance. " + "You savor the fear in their eyes briefly before you raise your claw to deal a killing blow. " + "Right before it connects, you wake up.") elif time_left == 2: message = ("You dreamt of running through the woods outside the village at night, wind blowing across your " + "face as you weave between the pines. Suddenly you hear a rustling sound as a monstrous creature " + "jumps out at you - a werewolf! You start running as fast as you can, you soon feel yourself falling " + "down as you trip over a rock. You look up helplessly as the werewolf catches up to you, " + "then wake up screaming.") else: message = ("You had a strange dream last night; a person was running away from something through a forest. " + "They tripped and fell over a rock as a shadow descended upon them. Before you could actually see " + "who or what the pursuer was, you woke with a start.") return message @cmd("bite", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("alpha wolf",)) def bite_cmd(cli, nick, chan, rest): """Bite a player, turning them into a wolf after a certain number of nights.""" if nick in var.ALPHA_WOLVES and nick not in var.BITE_PREFERENCES: pm(cli, nick, "You have already bitten someone this game.") return if not var.ALPHA_ENABLED: pm(cli, nick, "You may only bite someone after another wolf has died during the day.") return if var.DISEASED_WOLVES: pm(cli, nick, "You are feeling ill, and are unable to kill anyone tonight.") return victim = get_victim(cli, nick, re.split(" +",rest)[0], False, False) if not victim: pm(cli, nick, "Please choose who to bite by specifying their nick.") return vrole = var.get_role(victim) actual = choose_target(nick, victim) if vrole in var.WOLFCHAT_ROLES: pm(cli, nick, "You may not bite other wolves.") return if nick not in var.ALPHA_WOLVES: var.ALPHA_WOLVES.append(nick) var.BITE_PREFERENCES[nick] = actual # biting someone makes them ineligible to participate in the kill if nick in var.KILLS: del var.KILLS[nick] pm(cli, nick, "You have chosen to bite \u0002{0}\u0002.".format(victim)) wolfchat = var.list_players(var.WOLFCHAT_ROLES) for wolf in wolfchat: if wolf != nick: pm(cli, wolf, "\u0002{0}\u0002 has chosen to bite \u0002{1}\u0002.".format(nick, victim)) debuglog("{0} ({1}) BITE: {2} ({3})".format(nick, var.get_role(nick), actual, var.get_role(actual))) @cmd("pass", chan=False, pm=True, playing=True, phases=("night",), roles=("hunter","harlot","bodyguard","guardian angel","turncoat","warlock","piper")) def pass_cmd(cli, nick, chan, rest): """Decline to use your special power for that night.""" nickrole = var.get_role(nick) # turncoats can change roles and pass even if silenced if nickrole != "turncoat" and nick in var.SILENCED: if chan == nick: pm(cli, nick, "You have been silenced, and are unable to use any special powers.") else: cli.notice(nick, "You have been silenced, and are unable to use any special powers.") return if nickrole == "hunter": if nick in var.OTHER_KILLS.keys(): del var.OTHER_KILLS[nick] var.HUNTERS.remove(nick) pm(cli, nick, "You have decided not to kill anyone tonight.") if nick not in var.PASSED: # Prevents multiple entries var.PASSED.append(nick) elif nickrole == "harlot": if var.HVISITED.get(nick): pm(cli, nick, ("You are already spending the night "+ "with \u0002{0}\u0002.").format(var.HVISITED[nick])) return var.HVISITED[nick] = None pm(cli, nick, "You have chosen to stay home for the night.") elif nickrole == "bodyguard" or nickrole == "guardian angel": if var.GUARDED.get(nick): pm(cli, nick, "You are already protecting someone tonight.") return var.GUARDED[nick] = None pm(cli, nick, "You have chosen not to guard anyone tonight.") elif nickrole == "turncoat": if var.TURNCOATS[nick][1] == var.NIGHT_COUNT: # theoretically passing would revert them to how they were before, but # we aren't tracking that, so just tell them to change it back themselves. pm(cli, nick, ("You have already changed sides tonight. Use " + '"side villagers" or "side wolves" to modify your selection.')) return pm(cli, nick, "You have decided not to change sides tonight.") if var.TURNCOATS[nick][1] == var.NIGHT_COUNT - 1: # don't add to var.PASSED since we aren't counting them anyway for nightdone # let them still use !pass though to make them feel better or something return if nick not in var.PASSED: var.PASSED.append(nick) elif nickrole == "warlock": if nick in var.CURSED: pm(cli, nick, "You have already cursed someone tonight.") return pm(cli, nick, "You have chosen not to curse anyone tonight.") wolfchatwolves = var.list_players(var.WOLFCHAT_ROLES) for wolf in wolfchatwolves: if wolf != nick: pm(cli, wolf, "\u0002{0}\u0002 has chosen not to curse anyone tonight.".format(nick)) if nick not in var.PASSED: var.PASSED.append(nick) elif nickrole == "piper": if nick in var.CHARMERS: pm(cli, nick, "You have already charmed players tonight.") return pm(cli, nick, "You have chosen not to charm anyone tonight.") if nick not in var.PASSED: var.PASSED.append(nick) debuglog("{0} ({1}) PASS".format(nick, var.get_role(nick))) chk_nightdone(cli) @cmd("side", chan=False, pm=True, playing=True, phases=("night",), roles=("turncoat",)) def change_sides(cli, nick, chan, rest, sendmsg=True): if var.TURNCOATS[nick][1] == var.NIGHT_COUNT - 1: pm(cli, nick, "You have changed sides yesterday night, and may not do so again tonight.") return team = re.split(" +", rest)[0] team, _ = complete_match(team, ("villagers", "wolves")) if not team: pm(cli, nick, "Please specify which team you wish to side with, villagers or wolves.") return pm(cli, nick, "You are now siding with \u0002{0}\u0002.".format(team)) var.TURNCOATS[nick] = (team, var.NIGHT_COUNT) debuglog("{0} ({1}) SIDE {2}".format(nick, var.get_role(nick), team)) chk_nightdone(cli) @cmd("choose", "match", chan=False, pm=True, playing=True, phases=("night",), roles=("matchmaker",)) def choose(cli, nick, chan, rest, sendmsg=True): """Select two players to fall in love. You may select yourself as one of the lovers.""" if not var.FIRST_NIGHT: return if nick in var.MATCHMAKERS: pm(cli, nick, "You have already chosen lovers.") return # no var.SILENCED check for night 1 only roles; silence should only apply for the night after # but just in case, it also sucks if the one night you're allowed to act is when you are # silenced, so we ignore it here anyway. pieces = re.split(" +",rest) victim = pieces[0] if len(pieces) > 1: if len(pieces) > 2 and pieces[1].lower() == "and": victim2 = pieces[2] else: victim2 = pieces[1] else: victim2 = None victim = get_victim(cli, nick, victim, False, True) if not victim: return victim2 = get_victim(cli, nick, victim2, False, True) if not victim2: return if victim == victim2: pm(cli, nick, "You must choose two different people.") return var.MATCHMAKERS.append(nick) if victim in var.LOVERS: var.LOVERS[victim].append(victim2) var.ORIGINAL_LOVERS[victim].append(victim2) else: var.LOVERS[victim] = [victim2] var.ORIGINAL_LOVERS[victim] = [victim2] if victim2 in var.LOVERS: var.LOVERS[victim2].append(victim) var.ORIGINAL_LOVERS[victim2].append(victim) else: var.LOVERS[victim2] = [victim] var.ORIGINAL_LOVERS[victim2] = [victim] if sendmsg: pm(cli, nick, "You have selected \u0002{0}\u0002 and \u0002{1}\u0002 to be lovers.".format(victim, victim2)) if victim in var.PLAYERS and not is_user_simple(victim): pm(cli, victim, ("You are \u0002in love\u0002 with {0}. If that player dies for any " + "reason, the pain will be too much for you to bear and you will " + "commit suicide.").format(victim2)) else: pm(cli, victim, "You are \u0002in love\u0002 with {0}.".format(victim2)) if victim2 in var.PLAYERS and not is_user_simple(victim2): pm(cli, victim2, ("You are \u0002in love\u0002 with {0}. If that player dies for any " + "reason, the pain will be too much for you to bear and you will " + "commit suicide.").format(victim)) else: pm(cli, victim2, "You are \u0002in love\u0002 with {0}.".format(victim)) debuglog("{0} ({1}) MATCH: {2} ({3}) + {4} ({5})".format(nick, var.get_role(nick), victim, var.get_role(victim), victim2, var.get_role(victim2))) chk_nightdone(cli) @cmd("target", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("assassin",)) def target(cli, nick, chan, rest): """Pick a player as your target, killing them if you die.""" if nick in var.TARGETED and var.TARGETED[nick] != None: pm(cli, nick, "You have already chosen a target.") return victim = get_victim(cli, nick, re.split(" +",rest)[0], False) if not victim: return if nick == victim: pm(cli, nick, "You may not target yourself.") return victim = choose_target(nick, victim) # assassin is a template so it will never get swapped, so don't check for exchanges with it var.TARGETED[nick] = victim pm(cli, nick, "You have selected \u0002{0}\u0002 as your target.".format(victim)) debuglog("{0} ({1}-{2}) TARGET: {3} ({4})".format(nick, "-".join(var.get_templates(nick)), var.get_role(nick), victim, var.get_role(victim))) chk_nightdone(cli) @cmd("hex", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("hag",)) def hex_target(cli, nick, chan, rest): """Hex someone, preventing them from acting the next day and night.""" if nick in var.HEXED: pm(cli, nick, "You have already hexed someone tonight.") return victim = get_victim(cli, nick, re.split(" +",rest)[0], False) if not victim: return if nick == victim: pm(cli, nick, "You may not target yourself.") return if var.LASTHEXED.get(nick) == victim: pm(cli, nick, ("You hexed \u0002{0}\u0002 last night. " + "You cannot hex the same person two nights in a row.").format(victim)) return victim = choose_target(nick, victim) if check_exchange(cli, nick, victim): return vrole = var.get_role(victim) if vrole in var.WOLFCHAT_ROLES: pm(cli, nick, "Hexing another wolf would be a waste.") return var.HEXED.append(nick) var.LASTHEXED[nick] = victim var.TOBESILENCED.append(victim) pm(cli, nick, "You have cast a hex on \u0002{0}\u0002.".format(victim)) wolfchatwolves = var.list_players(var.WOLFCHAT_ROLES) for wolf in wolfchatwolves: if wolf != nick: pm(cli, wolf, "\u0002{0}\u0002 has cast a hex on \u0002{1}\u0002.".format(nick, victim)) debuglog("{0} ({1}) HEX: {2} ({3})".format(nick, var.get_role(nick), victim, var.get_role(victim))) chk_nightdone(cli) @cmd("curse", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("warlock",)) def curse(cli, nick, chan, rest): if nick in var.CURSED: # CONSIDER: this happens even if they choose to not curse, should maybe let them # pick again in that case instead of locking them into doing nothing. pm(cli, nick, "You have already cursed someone tonight.") return victim = get_victim(cli, nick, re.split(" +",rest)[0], False) if not victim: return # There may actually be valid strategy in cursing other wolfteam members, # but for now it is not allowed. If someone seems suspicious and shows as # villager across multiple nights, safes can use that as a tell that the # person is likely wolf-aligned. vrole = var.get_role(victim) if victim in var.ROLES["cursed villager"]: pm(cli, nick, "\u0002{0}\u0002 is already cursed.".format(victim)) return if vrole in var.WOLFCHAT_ROLES: pm(cli, nick, "Cursing a fellow wolf would be a waste.") return victim = choose_target(nick, victim) if check_exchange(cli, nick, victim): return var.CURSED.append(nick) if nick in var.PASSED: var.PASSED.remove(nick) if victim not in var.ROLES["cursed villager"]: var.ROLES["cursed villager"].append(victim) pm(cli, nick, "You have cast a curse on \u0002{0}\u0002.".format(victim)) wolfchatwolves = var.list_players(var.WOLFCHAT_ROLES) for wolf in wolfchatwolves: if wolf != nick: pm(cli, wolf, "\u0002{0}\u0002 has cast a curse on \u0002{1}\u0002.".format(nick, victim)) debuglog("{0} ({1}) CURSE: {2} ({3})".format(nick, var.get_role(nick), victim, var.get_role(victim))) chk_nightdone(cli) @cmd("clone", chan=False, pm=True, playing=True, phases=("night",), roles=("clone",)) def clone(cli, nick, chan, rest): """Clone another player. You will turn into their role if they die.""" if not var.FIRST_NIGHT: return if nick in var.CLONED.keys(): pm(cli, nick, "You have already chosen to clone someone.") return # no var.SILENCED check for night 1 only roles; silence should only apply for the night after # but just in case, it also sucks if the one night you're allowed to act is when you are # silenced, so we ignore it here anyway. victim = get_victim(cli, nick, re.split(" +",rest)[0], False) if not victim: return if nick == victim: pm(cli, nick, "You may not target yourself.") return var.CLONED[nick] = victim pm(cli, nick, "You have chosen to clone \u0002{0}\u0002.".format(victim)) debuglog("{0} ({1}) CLONE: {2} ({3})".format(nick, var.get_role(nick), victim, var.get_role(victim))) chk_nightdone(cli) @cmd("charm", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("piper",)) def charm(cli, nick, chan, rest): """Charm a player, slowly leading to your win!""" if nick in var.CHARMERS: pm(cli, nick, "You have already charmed players tonight.") return pieces = re.split(" +",rest) victim = pieces[0] if len(pieces) > 1: if len(pieces) > 2 and pieces[1].lower() == "and": victim2 = pieces[2] else: victim2 = pieces[1] else: victim2 = None victim = get_victim(cli, nick, victim, False, True) if not victim: return if victim2 is not None: victim2 = get_victim(cli, nick, victim2, False, True) if not victim2: return if victim == victim2: pm(cli, nick, "You must choose two different people.") return if nick in (victim, victim2): pm(cli, nick, "You may not charm yourself.") return charmedlist = var.CHARMED|var.TOBECHARMED if victim in charmedlist or victim2 and victim2 in charmedlist: if victim in charmedlist and victim2 and victim2 in charmedlist: pm(cli, nick, "\u0002{0}\u0002 and \u0002{1}\u0002 are already charmed!".format(victim, victim2)) return if (len(var.list_players()) - len(var.ROLES["piper"]) - len(charmedlist) - 2 >= 0 or victim in charmedlist and not victim2): pm(cli, nick, "\u0002{0}\u0002 is already charmed!".format(victim in charmedlist and victim or victim2)) return var.CHARMERS.add(nick) if nick in var.PASSED: var.PASSED.remove(nick) var.TOBECHARMED.add(victim) if victim2: var.TOBECHARMED.add(victim2) pm(cli, nick, "You have charmed \u0002{0}\u0002{1}.".format(victim, victim2 and " and \u0002{0}\u0002".format(victim2) or "")) # if there are other pipers, tell them who gets charmed (so they don't have to keep guessing who they are still allowed to charm) for piper in var.ROLES["piper"]: if piper != nick: if victim2: pm(cli, piper, "Another piper has charmed \u0002{0}\u0002 and \u0002{1}\u0002!".format(victim, victim2)) else: pm(cli, piper, "Another piper has charmed \u0002{0}\u0002!".format(victim)) if victim2: debuglog("{0} ({1}) CHARM {2} ({3}) && {4} ({5})".format(nick, var.get_role(nick), victim, var.get_role(victim), victim2, var.get_role(victim2))) else: debuglog("{0} ({1}) CHARM {2} ({3})".format(nick, var.get_role(nick), victim, var.get_role(victim))) chk_nightdone(cli) @hook("featurelist") # For multiple targets with PRIVMSG def getfeatures(cli, nick, *rest): for r in rest: if r.startswith("TARGMAX="): x = r[r.index("PRIVMSG:"):] if "," in x: l = x[x.index(":")+1:x.index(",")] else: l = x[x.index(":")+1:] l = l.strip() if not l or not l.isdigit(): continue else: var.MAX_PRIVMSG_TARGETS = int(l) continue if r.startswith("PREFIX="): prefs = r[7:] chp = [] nlp = [] finder = True for char in prefs: if char == "(": continue if char == ")": finder = False continue if finder: chp.append(char) else: nlp.append(char) allp = zip(chp, nlp) var.MODES_PREFIXES = {} for combo in allp: var.MODES_PREFIXES[combo[1]] = combo[0] # For some reason this needs to be backwards if var.AUTO_TOGGLE_MODES: tocheck = set(var.AUTO_TOGGLE_MODES) var.AUTO_TOGGLE_MODES = set(var.AUTO_TOGGLE_MODES) for mode in tocheck: if not mode in var.MODES_PREFIXES.keys() and not mode in var.MODES_PREFIXES.values(): var.AUTO_TOGGLE_MODES.remove(mode) continue if not mode in var.MODES_PREFIXES.values(): for chp in var.MODES_PREFIXES.keys(): if chp == mode: var.AUTO_TOGGLE_MODES.remove(chp) var.AUTO_TOGGLE_MODES.add(var.MODES_PREFIXES[mode]) if "v" in var.AUTO_TOGGLE_MODES: var.AUTO_TOGGLE_MODES.remove("v") if r.startswith("CHANMODES="): chans = r[10:].split(",") var.LISTMODES, var.MODES_ALLSET, var.MODES_ONLYSET, var.MODES_NOSET = chans if r.startswith("MODES="): try: var.MODELIMIT = int(r[6:]) except ValueError: pass if r.startswith("STATUSMSG="): var.STATUSMSG_PREFIXES = list(r.split("=")[1]) def mass_privmsg(cli, targets, msg, notice=False, privmsg=False): if not notice and not privmsg: msg_targs = [] not_targs = [] for target in targets: if is_user_notice(target): not_targs.append(target) else: msg_targs.append(target) while msg_targs: if len(msg_targs) <= var.MAX_PRIVMSG_TARGETS: bgs = ",".join(msg_targs) msg_targs = None else: bgs = ",".join(msg_targs[:var.MAX_PRIVMSG_TARGETS]) msg_targs = msg_targs[var.MAX_PRIVMSG_TARGETS:] cli.msg(bgs, msg) while not_targs: if len(not_targs) <= var.MAX_PRIVMSG_TARGETS: bgs = ",".join(not_targs) not_targs = None else: bgs = ",".join(not_targs[:var.MAX_PRIVMSG_TARGETS]) not_targs = not_targs[var.MAX_PRIVMSG_TARGETS:] cli.notice(bgs, msg) else: while targets: if len(targets) <= var.MAX_PRIVMSG_TARGETS: bgs = ",".join(targets) targets = None else: bgs = ",".join(targets[:var.MAX_PRIVMSG_TARGETS]) target = targets[var.MAX_PRIVMSG_TARGETS:] if notice: cli.notice(bgs, msg) else: cli.msg(bgs, msg) @cmd("", chan=False, pm=True) def relay(cli, nick, chan, rest): """Let the wolves talk to each other through the bot""" if rest.startswith("\u0001PING"): cli.notice(nick, rest) return if var.PHASE not in ("night", "day"): return if nick in var.list_players() and nick in getattr(var, "IDLE_WARNED_PM", ()): cli.msg(nick, ("\u0002You have been idling in {0} for a while. Please say something in {0} " "or you will be declared dead.\u0002").format(botconfig.CHANNEL)) var.IDLE_WARNED_PM.add(nick) badguys = var.list_players(var.WOLFCHAT_ROLES) if len(badguys) > 1: if nick in badguys: badguys.remove(nick) # remove self from list if rest.startswith("\u0001ACTION"): rest = rest[7:-1] mass_privmsg(cli, [guy for guy in badguys if guy in var.PLAYERS], "* \u0002{0}\u0002{1}".format(nick, rest)) else: mass_privmsg(cli, [guy for guy in badguys if guy in var.PLAYERS], "\u0002{0}\u0002 says: {1}".format(nick, rest)) def transition_night(cli): if var.PHASE == "night": return var.PHASE = "night" var.GAMEPHASE = "night" for x, tmr in var.TIMERS.items(): # cancel daytime timer tmr[0].cancel() var.TIMERS = {} # Reset nighttime variables var.KILLS = {} var.OTHER_KILLS = {} var.GUARDED = {} # key = by whom, value = the person that is visited var.KILLER = "" # nickname of who chose the victim var.SEEN = [] # list of seers that have had visions var.HEXED = [] # list of hags that have hexed var.CURSED = [] # list of warlocks that have cursed var.SHAMANS = {} var.PASSED = [] # list of certain roles that have chosen not to act var.OBSERVED = {} # those whom werecrows have observed var.CHARMERS = set() # pipers who have charmed var.HVISITED = {} var.ASLEEP = [] var.PROTECTED = [] var.DESPERATE = [] var.REVEALED = [] var.TOBESILENCED = [] var.IMPATIENT = [] var.DEATH_TOTEM = [] var.PACIFISTS = [] var.INFLUENTIAL = [] var.TOBELYCANTHROPES = [] var.TOBELUCKY = [] var.TOBEDISEASED = [] var.RETRIBUTION = [] var.TOBEMISDIRECTED = [] var.NIGHT_START_TIME = datetime.now() var.NIGHT_COUNT += 1 var.FIRST_NIGHT = (var.NIGHT_COUNT == 1) var.TOTEMS = {} var.ACTED_EXTRA = 0 daydur_msg = "" if var.NIGHT_TIMEDELTA or var.START_WITH_DAY: # transition from day td = var.NIGHT_START_TIME - var.DAY_START_TIME var.DAY_START_TIME = None var.DAY_TIMEDELTA += td min, sec = td.seconds // 60, td.seconds % 60 daydur_msg = "Day lasted \u0002{0:0>2}:{1:0>2}\u0002. ".format(min,sec) chan = botconfig.CHANNEL if var.NIGHT_TIME_LIMIT > 0: var.NIGHT_ID = time.time() t = threading.Timer(var.NIGHT_TIME_LIMIT, transition_day, [cli, var.NIGHT_ID]) var.TIMERS["night"] = (t, var.NIGHT_ID, var.NIGHT_TIME_LIMIT) t.daemon = True t.start() if var.NIGHT_TIME_WARN > 0: t2 = threading.Timer(var.NIGHT_TIME_WARN, night_warn, [cli, var.NIGHT_ID]) var.TIMERS["night_warn"] = (t2, var.NIGHT_ID, var.NIGHT_TIME_WARN) t2.daemon = True t2.start() # convert bitten people to wolves, and advance bite stage bittencopy = copy.copy(var.BITTEN) for chump in bittencopy: var.BITTEN[chump] -= 1 # short-circuit if they are already a wolf # this makes playing the day transition message easier since we can keep # var.BITTEN around for a day after they turn chumprole = var.get_role(chump) if chumprole in var.WOLF_ROLES: del var.BITTEN[chump] continue if var.BITTEN[chump] <= 0: # now a wolf newrole = "wolf" if chumprole == "guardian angel": pm(cli, chump, ("As the moonlight filters through your window, you think back on the past few days. " + "Your power has been growing, but the villagers you protect subconsciously detected " + "your shift and have been keeping more distant from you. Grinning with wicked resolve, " + "you vow to show them what fools they have been as you take to the skies once more " + "with an unholy vengeance. Soon they will know true fear.")) # fallen angels also automatically gain the assassin template if they don't already have it # by default GA can never be assassin, but this guards against non-default cases newrole = "fallen angel" if chump not in var.ROLES["assassin"]: var.ROLES["assassin"].append(chump) debuglog("{0} ({1}) TURNED FALLEN ANGEL".format(chump, chumprole)) else: pm(cli, chump, ("As you prepare for bed, you watch in horror as your body starts growing a coat of fur! " + "Sudden realization hits you as you grin with your now muzzled face; that mysterious bite " + "earlier slowly changed you into a werewolf! You feel bigger, stronger, faster, and ready to " + "seize the night as you stealthily exit your home and search for the rest of your pack...")) debuglog("{0} ({1}) TURNED WOLF".format(chump, chumprole)) var.BITTEN_ROLES[chump] = chumprole var.ROLES[chumprole].remove(chump) var.ROLES[newrole].append(chump) var.FINAL_ROLES[chump] = newrole for wolf in var.list_players(var.WOLFCHAT_ROLES): if wolf != chump: # no need for a/an since newrole is either wolf or fallen angel pm(cli, wolf, "\u0002{0}\u0002 is now a \u0002{1}\u0002!".format(chump, newrole)) # convert amnesiac if var.NIGHT_COUNT == var.AMNESIAC_NIGHTS: amns = copy.copy(var.ROLES["amnesiac"]) for amn in amns: event = Event("amnesiac_turn", {}) if event.dispatch(var, amn, var.AMNESIAC_ROLES[amn]): amnrole = var.AMNESIAC_ROLES[amn] var.ROLES["amnesiac"].remove(amn) var.ROLES[amnrole].append(amn) var.AMNESIACS.append(amn) var.FINAL_ROLES[amn] = amnrole if var.FIRST_NIGHT: # we don't need to tell them twice if they remember right away continue showrole = amnrole if showrole == "time lord": showrole = "villager" elif showrole == "vengeful ghost": showrole = var.DEFAULT_ROLE n = "" if showrole.startswith(("a", "e", "i", "o", "u")): n = "n" pm(cli, amn, "Your amnesia clears and you now remember that you are a{0} \u0002{1}\u0002!".format(n, showrole)) if amnrole in var.WOLFCHAT_ROLES: for wolf in var.list_players(var.WOLFCHAT_ROLES): if wolf != amn: # don't send "Foo is now a wolf!" to 'Foo' pm(cli, wolf, "\u0002{0}\u0002 is now a \u0002{1}\u0002!".format(amn, showrole)) elif amnrole == "turncoat": var.TURNCOATS[amn] = ("none", -1) debuglog("{0} REMEMBER: {1} as {2}".format(amn, amnrole, showrole)) if var.FIRST_NIGHT and chk_win(cli, end_game=False): # prevent game from ending as soon as it begins (useful for the random game mode) start(cli, botconfig.NICK, botconfig.CHANNEL, restart=var.CURRENT_GAMEMODE.name) return # game ended from bitten / amnesiac turning, narcolepsy totem expiring, or other weirdness if chk_win(cli): return # send PMs ps = var.list_players() wolves = var.list_players(var.WOLFCHAT_ROLES) for wolf in wolves: normal_notify = wolf in var.PLAYERS and not is_user_simple(wolf) role = var.get_role(wolf) cursed = "cursed " if wolf in var.ROLES["cursed villager"] else "" if normal_notify: if role == "wolf": pm(cli, wolf, ('You are a \u0002wolf\u0002. It is your job to kill all the '+ 'villagers. Use "kill <nick>" to kill a villager.')) elif role == "traitor": if cursed: pm(cli, wolf, ('You are a \u0002cursed traitor\u0002. Normally, you would be ' 'seen as a villager by the seer and oracle, but since you\'re ' 'cursed, you are seen as a wolf.')) else: pm(cli, wolf, ('You are a \u0002traitor\u0002. You are exactly like a villager ' 'and not even a seer or oracle can see your true identity, ' 'only detectives and augurs can.')) elif role == "werecrow": pm(cli, wolf, ('You are a \u0002werecrow\u0002. You are able to fly at night. '+ 'Use "kill <nick>" to kill a villager. Alternatively, you can '+ 'use "observe <nick>" to check if someone is in bed or not. '+ 'Observing will prevent you from participating in a killing.')) elif role == "hag": pm(cli, wolf, ('You are a \u0002{0}hag\u0002. You can hex someone to prevent them ' + 'from using any special powers they may have during the next day ' + 'and night. Use "hex <nick>" to hex them. Only detectives can reveal ' + 'your true identity, seers will see you as a regular villager.').format(cursed)) elif role == "sorcerer": pm(cli, wolf, ('You are a \u0002{0}sorcerer\u0002. You can use "observe <nick>" to ' + 'observe someone and determine if they are the seer, oracle, or augur. ' + 'Only detectives can reveal your true identity, seers will see you ' + 'as a regular villager.').format(cursed)) elif role == "wolf cub": pm(cli, wolf, ('You are a \u0002wolf cub\u0002. While you cannot kill anyone, ' + 'the other wolves will become enraged if you die and will get ' + 'two kills the following night.')) elif role == "alpha wolf": pm(cli, wolf, ('You are an \u0002alpha wolf\u0002. Once per game following the death of another wolf ' + 'during the day, you can choose to bite the wolves\' next target to turn ' + 'them into a wolf instead of killing them. Kill villagers by using ' '"kill <nick>" and "bite" to use your once-per-game bite power.')) elif role == "werekitten": pm(cli, wolf, ('You are a \u0002werekitten\u0002. Due to your overwhelming cuteness, the seer ' + 'always sees you as villager and the gunner will always miss you. Detectives can ' + 'still reveal your true identity, however. Use "kill <nick>" to kill a villager.')) elif role == "warlock": pm(cli, wolf, ('You are a \u0002{0}warlock\u0002. Each night you can curse someone with "curse <nick>" ' + 'to turn them into a cursed villager, so the seer sees them as wolf. Act quickly, as ' + 'your curse applies as soon as you cast it! Only detectives can reveal your true identity, ' + 'seers will see you as a regular villager.').format(cursed)) elif role == "wolf mystic": pm(cli, wolf, ('You are a \u0002wolf mystic\u0002. Each night you divine the number of alive good villagers ' + 'who have a special role. You may also use "kill <nick>" to kill a villager.')) elif role == "fallen angel": pm(cli, wolf, ('You are a \u0002fallen angel\u0002. Your sharp claws will rend any protection the villagers ' + 'may have, and will likely kill living guardians as well. Use "kill <nick>" to kill a villager.')) else: # catchall in case we forgot something above an = 'n' if role.startswith(("a", "e", "i", "o", "u")) else "" pm(cli, wolf, ('You are a{0} \u0002{1}\u0002. There would normally be instructions ' + 'here, but someone forgot to add them in. Please report this to ' + 'the admins, you can PM me "admins" for a list of available ones.').format(an, role)) if len(wolves) > 1: pm(cli, wolf, 'Also, if you PM me, your message will be relayed to other wolves.') else: an = "n" if cursed == "" and role.startswith(("a", "e", "i", "o", "u")) else "" pm(cli, wolf, 'You are a{0} \u0002{1}{2}\u0002.'.format(an, cursed, role)) # !simple pl = ps[:] random.shuffle(pl) pl.remove(wolf) # remove self from list for i, player in enumerate(pl): prole = var.get_role(player) if prole in var.WOLFCHAT_ROLES: cursed = "" if player in var.ROLES["cursed villager"]: cursed = "cursed " pl[i] = "\u0002{0}\u0002 ({1}{2})".format(player, cursed, prole) elif player in var.ROLES["cursed villager"]: pl[i] = player + " (cursed)" pm(cli, wolf, "Players: " + ", ".join(pl)) if role == "wolf mystic": # if adding this info to !myrole, you will need to save off this count so that they can't get updated info until the next night # # of special villagers = # of players - # of villagers - # of wolves - # of neutrals numvills = len(ps) - len(var.list_players(var.WOLFTEAM_ROLES)) - len(var.list_players(("villager", "vengeful ghost", "time lord", "amnesiac", "lycan"))) - len(var.list_players(var.TRUE_NEUTRAL_ROLES)) pm(cli, wolf, "There {0} \u0002{1}\u0002 special villager{2} still alive.".format("are" if numvills != 1 else "is", numvills, "s" if numvills != 1 else "")) if wolf in var.WOLF_GUNNERS.keys() and var.WOLF_GUNNERS[wolf] > 0: pm(cli, wolf, "You have a \u0002gun\u0002 with {0} bullet{1}.".format(var.WOLF_GUNNERS[wolf], "s" if var.WOLF_GUNNERS[wolf] > 1 else "")) if var.DISEASED_WOLVES: pm(cli, wolf, 'You are feeling ill tonight, and are unable to kill anyone.') elif var.ANGRY_WOLVES and role in var.WOLF_ROLES and role != "wolf cub": pm(cli, wolf, 'You are \u0002angry\u0002 tonight, and may kill two targets by using "kill <nick1> and <nick2>".') if var.ALPHA_ENABLED and role == "alpha wolf" and wolf not in var.ALPHA_WOLVES: pm(cli, wolf, ('You may use "bite <nick>" tonight in order to turn your target into a wolf instead of participating in tonight\'s kill. ' + 'They will turn into a wolf in {0} night{1}.').format(var.ALPHA_WOLF_NIGHTS, 's' if var.ALPHA_WOLF_NIGHTS > 1 else '')) for seer in var.list_players(("seer", "oracle", "augur")): pl = ps[:] random.shuffle(pl) role = var.get_role(seer) pl.remove(seer) # remove self from list a = "a" if role in ("oracle", "augur"): a = "an" if role == "seer": what = "the role of a player" elif role == "oracle": what = "whether or not a player is a wolf" elif role == "augur": what = "which team a player is on" else: what = "??? (this is a bug, please report to admins)" if seer in var.PLAYERS and not is_user_simple(seer): pm(cli, seer, ('You are {0} \u0002{1}\u0002. '+ 'It is your job to detect the wolves, you '+ 'may have a vision once per night. '+ 'Use "see <nick>" to see {2}.').format(a, role, what)) else: pm(cli, seer, "You are {0} \u0002{1}\u0002.".format(a, role)) # !simple pm(cli, seer, "Players: " + ", ".join(pl)) for harlot in var.ROLES["harlot"]: pl = ps[:] random.shuffle(pl) pl.remove(harlot) if harlot in var.PLAYERS and not is_user_simple(harlot): pm(cli, harlot, ('You are a \u0002harlot\u0002. '+ 'You may spend the night with one person per round. '+ 'If you visit a victim of a wolf, or visit a wolf, '+ 'you will die. You may stay home by visiting yourself. ' + 'Use "visit <nick>" to visit a player.')) else: pm(cli, harlot, "You are a \u0002harlot\u0002.") # !simple pm(cli, harlot, "Players: " + ", ".join(pl)) # the messages for angel and guardian angel are different enough to merit individual loops for g_angel in var.ROLES["bodyguard"]: pl = ps[:] random.shuffle(pl) pl.remove(g_angel) chance = math.floor(var.BODYGUARD_DIES_CHANCE * 100) warning = "" if chance > 0: warning = "If you guard a wolf, there is a {0}% chance of you dying. ".format(chance) if g_angel in var.PLAYERS and not is_user_simple(g_angel): pm(cli, g_angel, ('You are a \u0002bodyguard\u0002. '+ 'It is your job to protect the villagers. {0}If you guard '+ 'a victim, you will sacrifice yourself to save them. ' + 'Use "pass" to not guard anyone tonight. ' + 'Use "guard <nick>" to guard a player.').format(warning)) else: pm(cli, g_angel, "You are a \u0002bodyguard\u0002.") # !simple pm(cli, g_angel, "Players: " + ", ".join(pl)) for gangel in var.ROLES["guardian angel"]: pl = ps[:] random.shuffle(pl) gself = "You may also guard yourself. " if not var.GUARDIAN_ANGEL_CAN_GUARD_SELF: pl.remove(gangel) gself = "" if gangel in var.LASTGUARDED: if var.LASTGUARDED[gangel] in pl: pl.remove(var.LASTGUARDED[gangel]) chance = math.floor(var.GUARDIAN_ANGEL_DIES_CHANCE * 100) warning = "" if chance > 0: warning = "If you guard a wolf, there is a {0}% chance of you dying. ".format(chance) if gangel in var.PLAYERS and not is_user_simple(gangel): pm(cli, gangel, ('You are a \u0002guardian angel\u0002. '+ 'It is your job to protect the villagers. {0}If you guard '+ 'a victim, they will live. You may not guard the same person two nights in a row. ' + '{1}Use "guard <nick>" to guard a player.').format(warning, gself)) else: pm(cli, gangel, "You are a \u0002guardian angel\u0002.") # !simple pm(cli, gangel, "Players: " + ", ".join(pl)) for dttv in var.ROLES["detective"]: pl = ps[:] random.shuffle(pl) pl.remove(dttv) chance = math.floor(var.DETECTIVE_REVEALED_CHANCE * 100) warning = "" if chance > 0: warning = ("Each time you use your ability, you risk a {0}% chance of having " + "your identity revealed to the wolves. ").format(chance) if dttv in var.PLAYERS and not is_user_simple(dttv): pm(cli, dttv, ("You are a \u0002detective\u0002.\n"+ "It is your job to determine all the wolves and traitors. "+ "Your job is during the day, and you can see the true "+ "identity of all players, even traitors.\n"+ '{0}Use "id <nick>" in PM to identify any player during the day.').format(warning)) else: pm(cli, dttv, "You are a \u0002detective\u0002.") # !simple pm(cli, dttv, "Players: " + ", ".join(pl)) for drunk in var.ROLES["village drunk"]: if drunk in var.PLAYERS and not is_user_simple(drunk): pm(cli, drunk, "You have been drinking too much! You are the \u0002village drunk\u0002.") else: pm(cli, drunk, "You are the \u0002village drunk\u0002.") for mystic in var.ROLES["mystic"]: if mystic in var.PLAYERS and not is_user_simple(mystic): pm(cli, mystic, ("You are the \u0002mystic\u0002. Each night you divine the number of evil " + "villagers (including wolves) that are still alive.")) else: pm(cli, mystic, "You are the \u0002mystic\u0002.") # if adding this info to !myrole, you will need to save off this count so that they can't get updated info until the next night numevil = len(var.list_players(var.WOLFTEAM_ROLES)) pm(cli, mystic, "There {0} \u0002{1}\u0002 evil villager{2} still alive.".format("are" if numevil != 1 else "is", numevil, "s" if numevil != 1 else "")) max_totems = {} for sham in var.TOTEM_ORDER: max_totems[sham] = 0 for ix in range(0, len(var.TOTEM_ORDER)): for c in var.TOTEM_CHANCES.values(): max_totems[var.TOTEM_ORDER[ix]] += c[ix] for shaman in var.list_players(var.TOTEM_ORDER): pl = ps[:] random.shuffle(pl) if shaman in var.LASTGIVEN: if var.LASTGIVEN[shaman] in pl: pl.remove(var.LASTGIVEN[shaman]) role = var.get_role(shaman) indx = var.TOTEM_ORDER.index(role) target = 0 rand = random.random() * max_totems[var.TOTEM_ORDER[indx]] for t in var.TOTEM_CHANCES.keys(): target += var.TOTEM_CHANCES[t][indx] if rand <= target: var.TOTEMS[shaman] = t break if shaman in var.PLAYERS and not is_user_simple(shaman): pm(cli, shaman, ('You are a \u0002{0}\u0002. You can select a player to receive ' + 'a {1}totem each night by using "give <nick>". You may give yourself a totem, but you ' + 'may not give the same player a totem two nights in a row. ' + 'If you do not give the totem to anyone, it will be given to a random player.').format(role, "random " if shaman in var.ROLES["crazed shaman"] else "")) if role != "crazed shaman": totem = var.TOTEMS[shaman] tmsg = 'You have the \u0002{0}\u0002 totem. '.format(totem) if totem == "death": tmsg += 'The player who is given this totem will die tonight, even if they are being protected.' elif totem == "protection": tmsg += 'The player who is given this totem is protected from dying tonight.' elif totem == "revealing": tmsg += 'If the player who is given this totem is lynched, their role is revealed to everyone instead of them dying.' elif totem == "narcolepsy": tmsg += 'The player who is given this totem will be unable to vote during the day tomorrow.' elif totem == "silence": tmsg += 'The player who is given this totem will be unable to use any special powers during the day tomorrow and the night after.' elif totem == "desperation": tmsg += 'If the player who is given this totem is lynched, the last player to vote them will also die.' elif totem == "impatience": tmsg += 'The player who is given this totem is counted as voting for everyone except themselves, even if they do not !vote.' elif totem == "pacifism": tmsg += 'Votes by the player who is given this totem do not count.' elif totem == "influence": tmsg += 'Votes by the player who is given this totem count twice.' elif totem == "exchange": tmsg += 'The first person to use a power on the player given this totem tomorrow night will have their role swapped with the recipient.' elif totem == "lycanthropy": tmsg += 'If the player who is given this totem is targeted by wolves tomorrow night, they will become a wolf.' elif totem == "luck": tmsg += 'If the player who is given this totem is targeted tomorrow night, one of the players adjacent to them will be targeted instead.' elif totem == "pestilence": tmsg += 'If the player who is given this totem is killed by wolves tomorrow night, the wolves will not be able to kill the night after.' elif totem == "retribution": tmsg += 'If the player who is given this totem will die tonight, they also kill anyone who killed them.' elif totem == "misdirection": tmsg += 'If the player who is given this totem attempts to use a power the following day or night, they will target a player adjacent to their intended target instead of the player they targeted.' else: tmsg += 'No description for this totem is available. This is a bug, so please report this to the admins.' pm(cli, shaman, tmsg) else: pm(cli, shaman, "You are a \u0002{0}\u0002.".format(role)) if role != "crazed shaman": pm(cli, shaman, "You have the \u0002{0}\u0002 totem.".format(var.TOTEMS[shaman])) pm(cli, shaman, "Players: " + ", ".join(pl)) for hunter in var.ROLES["hunter"]: if hunter in var.HUNTERS: continue #already killed pl = ps[:] random.shuffle(pl) pl.remove(hunter) if hunter in var.PLAYERS and not is_user_simple(hunter): pm(cli, hunter, ('You are a \u0002hunter\u0002. Once per game, you may kill another ' + 'player with "kill <nick>". If you do not wish to kill anyone tonight, ' + 'use "pass" instead.')) else: pm(cli, hunter, "You are a \u0002hunter\u0002.") pm(cli, hunter, "Players: " + ", ".join(pl)) for ms in var.ROLES["mad scientist"]: pl = ps[:] index = var.ALL_PLAYERS.index(ms) targets = [] target1 = var.ALL_PLAYERS[index - 1] target2 = var.ALL_PLAYERS[index + 1 if index < len(var.ALL_PLAYERS) - 1 else 0] if len(var.ALL_PLAYERS) >= var.MAD_SCIENTIST_SKIPS_DEAD_PLAYERS: # determine left player i = index while True: i -= 1 if i < 0: i = len(var.ALL_PLAYERS) - 1 if var.ALL_PLAYERS[i] in pl or var.ALL_PLAYERS[i] == ms: target1 = var.ALL_PLAYERS[i] break # determine right player i = index while True: i += 1 if i >= len(var.ALL_PLAYERS): i = 0 if var.ALL_PLAYERS[i] in pl or var.ALL_PLAYERS[i] == ms: target2 = var.ALL_PLAYERS[i] break targets = "\u0002{0}\u0002 and \u0002{1}\u0002".format(target1, target2) if ms in var.PLAYERS and not is_user_simple(ms): pm(cli, ms, ("You are the \u0002mad scientist\u0002. If you die, " + "you will let loose a potent chemical concoction that " + "will kill {0} if they are still alive.".format(targets))) else: pm(cli, ms, "You are the \u0002mad scientist\u0002. Targets: {0}".format(targets)) for doctor in var.ROLES["doctor"]: if doctor in var.DOCTORS and var.DOCTORS[doctor] > 0: # has immunizations remaining pl = ps[:] random.shuffle(pl) if doctor in var.PLAYERS and not is_user_simple(doctor): pm(cli, doctor, ('You are a \u0002doctor\u0002. You can give out immunizations to ' + 'villagers by using "give <nick>" in PM during the daytime. ' + 'An immunized villager will die instead of turning into a wolf due to the ' + 'alpha wolf\'s or lycan\'s power.')) else: pm(cli, doctor, "You are a \u0002doctor\u0002.") pm(cli, doctor, 'You have \u0002{0}\u0002 immunization{1}.'.format(var.DOCTORS[doctor], 's' if var.DOCTORS[doctor] > 1 else '')) for fool in var.ROLES["fool"]: if fool in var.PLAYERS and not is_user_simple(fool): pm(cli, fool, ('You are a \u0002fool\u0002. The game immediately ends with you ' + 'being the only winner if you are lynched during the day. You cannot ' + 'otherwise win this game.')) else: pm(cli, fool, "You are a \u0002fool\u0002.") for jester in var.ROLES["jester"]: if jester in var.PLAYERS and not is_user_simple(jester): pm(cli, jester, ('You are a \u0002jester\u0002. You will win alongside the normal winners ' + 'if you are lynched during the day. You cannot otherwise win this game.')) else: pm(cli, jester, "You are a \u0002jester\u0002.") for monster in var.ROLES["monster"]: if monster in var.PLAYERS and not is_user_simple(monster): pm(cli, monster, ('You are a \u0002monster\u0002. You cannot be killed by the wolves. ' + 'If you survive until the end of the game, you win instead of the ' + 'normal winners.')) else: pm(cli, monster, "You are a \u0002monster\u0002.") for lycan in var.ROLES["lycan"]: if lycan in var.PLAYERS and not is_user_simple(lycan): pm(cli, lycan, ('You are a \u0002lycan\u0002. You are currently on the side of the ' + 'villagers, but will turn into a wolf instead of dying if you are ' + 'targeted by the wolves during the night.')) else: pm(cli, lycan, "You are a \u0002lycan\u0002.") for v_ghost, who in var.VENGEFUL_GHOSTS.items(): if who[0] == "!": continue wolves = var.list_players(var.WOLFTEAM_ROLES) if who == "wolves": pl = wolves else: pl = ps[:] for wolf in wolves: pl.remove(wolf) random.shuffle(pl) if v_ghost in var.PLAYERS and not is_user_simple(v_ghost): pm(cli, v_ghost, ('You are a \u0002vengeful ghost\u0002, sworn to take revenge on the ' + '{0} that you believe killed you. You must kill one of them with ' + '"kill <nick>" tonight. If you do not, one of them will be selected ' + 'at random.').format(who)) else: pm(cli, v_ghost, "You are a \u0002vengeful ghost\u0002.") pm(cli, v_ghost, who.capitalize() + ": " + ", ".join(pl)) debuglog("GHOST: {0} (target: {1}) - players: {2}".format(v_ghost, who, ", ".join(pl))) for ass in var.ROLES["assassin"]: if ass in var.TARGETED and var.TARGETED[ass] != None: continue # someone already targeted pl = ps[:] random.shuffle(pl) pl.remove(ass) role = var.get_role(ass) if role == "village drunk": var.TARGETED[ass] = random.choice(pl) message = ("You are an \u0002assassin\u0002. In your drunken stupor you have selected " + "\u0002{0}\u0002 as your target.").format(var.TARGETED[ass]) if ass in var.PLAYERS and not is_user_simple(ass): message += " If you die you will take out your target with you." pm(cli, ass, message) else: if ass in var.PLAYERS and not is_user_simple(ass): pm(cli, ass, ('You are an \u0002assassin\u0002. Choose a target with ' + '"target <nick>". If you die you will take out your target with you. ' + 'If your target dies you may choose another one.')) else: pm(cli, ass, "You are an \u0002assassin\u0002.") pm(cli, ass, "Players: " + ", ".join(pl)) for piper in var.ROLES["piper"]: pl = ps[:] random.shuffle(pl) pl.remove(piper) for charmed in var.CHARMED: if charmed in pl: # corner case: if there are multiple pipers and a piper is charmed, the piper will be in var.CHARMED but not in pl pl.remove(charmed) if piper in var.PLAYERS and not is_user_simple(piper): pm(cli, piper, ('You are a \u0002piper\u0002. You must select two players ' + 'to charm each night. The charmed players will know each ' + 'other, but not who charmed them. You win when all other ' + 'players are charmed. Use "charm <nick1> and <nick2>" to ' + 'select the players to charm.')) else: pm(cli, piper, "You are a \u0002piper\u0002.") pm(cli, piper, "Players: " + ", ".join(pl)) for turncoat in var.ROLES["turncoat"]: # they start out as unsided, but can change n1 if turncoat not in var.TURNCOATS: var.TURNCOATS[turncoat] = ("none", -1) if turncoat in var.PLAYERS and not is_user_simple(turncoat): message = ('You are a \u0002turncoat\u0002. You can change which team you\'re siding with every other night. ' + 'Use "side villagers" or "side wolves" to select your team. ') if var.TURNCOATS[turncoat][0] != "none": message += 'You are currently siding with \u0002{0}\u0002.'.format(var.TURNCOATS[turncoat][0]) else: message += 'If you die before selecting a side, you will not win.' pm(cli, turncoat, message) else: pm(cli, turncoat, 'You are a \u0002turncoat\u0002. Current side: \u0002{0}\u0002.'.format(var.TURNCOATS[turncoat][0])) if var.FIRST_NIGHT: for mm in var.ROLES["matchmaker"]: pl = ps[:] random.shuffle(pl) if mm in var.PLAYERS and not is_user_simple(mm): pm(cli, mm, ('You are a \u0002matchmaker\u0002. You can select two players ' + 'to be lovers with "choose <nick1> and <nick2>". If one lover ' + 'dies, the other will as well. You may select yourself as one ' + 'of the lovers. You may only select lovers during the first night. ' + 'If you do not select lovers, they will be randomly selected and ' + 'you will not be told who they are (unless you are one of them).')) else: pm(cli, mm, "You are a \u0002matchmaker\u0002.") pm(cli, mm, "Players: " + ", ".join(pl)) for clone in var.ROLES["clone"]: pl = ps[:] random.shuffle(pl) pl.remove(clone) if clone in var.PLAYERS and not is_user_simple(clone): pm(cli, clone, ('You are a \u0002clone\u0002. You can select someone to clone ' + 'with "clone <nick>". If that player dies, you become their ' + 'role(s). You may only clone someone during the first night.')) else: pm(cli, clone, "You are a \u0002clone\u0002.") pm(cli, clone, "Players: "+", ".join(pl)) for minion in var.ROLES["minion"]: wolves = var.list_players(var.WOLF_ROLES) random.shuffle(wolves) if minion in var.PLAYERS and not is_user_simple(minion): pm(cli, minion, "You are a \u0002minion\u0002. It is your job to help the wolves kill all of the villagers.") else: pm(cli, minion, "You are a \u0002minion\u0002.") pm(cli, minion, "Wolves: " + ", ".join(wolves)) villagers = copy.copy(var.ROLES["villager"]) villagers += var.ROLES["time lord"] if var.DEFAULT_ROLE == "villager": villagers += var.ROLES["vengeful ghost"] + var.ROLES["amnesiac"] random.shuffle(villagers) for villager in villagers: if villager in var.PLAYERS and not is_user_simple(villager): pm(cli, villager, "You are a \u0002villager\u0002. It is your job to lynch all of the wolves.") else: pm(cli, villager, "You are a \u0002villager\u0002.") cultists = copy.copy(var.ROLES["cultist"]) if var.DEFAULT_ROLE == "cultist": cultists += var.ROLES["vengeful ghost"] + var.ROLES["amnesiac"] random.shuffle(cultists) for cultist in cultists: if cultist in var.PLAYERS and not is_user_simple(cultist): pm(cli, cultist, "You are a \u0002cultist\u0002. It is your job to help the wolves kill all of the villagers.") else: pm(cli, cultist, "You are a \u0002cultist\u0002.") for g in var.GUNNERS.keys(): if g not in ps: continue elif not var.GUNNERS[g]: continue elif var.GUNNERS[g] == 0: continue norm_notify = g in var.PLAYERS and not is_user_simple(g) role = "gunner" if g in var.ROLES["sharpshooter"]: role = "sharpshooter" if norm_notify: if role == "gunner": gun_msg = ('You are a \u0002{0}\u0002 and hold a gun that shoots special silver bullets. ' + 'You may only use it during the day by typing "{0}shoot <nick>" in channel. '.format(botconfig.CMD_CHAR) + 'Wolves and the crow will die instantly when shot, but anyone else will ' + 'likely survive. You have {1}.') elif role == "sharpshooter": gun_msg = ('You are a \u0002{0}\u0002 and hold a gun that shoots special silver bullets. ' + 'You may only use it during the day by typing "{0}shoot <nick>" in channel. '.format(botconfig.CMD_CHAR) + 'Wolves and the crow will die instantly when shot, and anyone else will ' + 'likely die as well due to your skill with the gun. You have {1}.') else: gun_msg = ("You are a \u0002{0}\u0002 and have a gun with {1}.") if var.GUNNERS[g] == 1: gun_msg = gun_msg.format(role, "1 bullet") elif var.GUNNERS[g] > 1: gun_msg = gun_msg.format(role, str(var.GUNNERS[g]) + " bullets") else: continue pm(cli, g, gun_msg) dmsg = (daydur_msg + "It is now nighttime. All players "+ "check for PMs from me for instructions.") if not var.FIRST_NIGHT: dmsg = (dmsg + " If you did not receive one, simply sit back, "+ "relax, and wait patiently for morning.") cli.msg(chan, dmsg) debuglog("BEGIN NIGHT") # If there are no nightroles that can act, immediately turn it to daytime chk_nightdone(cli) def cgamemode(cli, arg): chan = botconfig.CHANNEL if var.ORIGINAL_SETTINGS: # needs reset reset_settings() modeargs = arg.split("=", 1) modeargs = [a.strip() for a in modeargs] if modeargs[0] in var.GAME_MODES.keys(): md = modeargs.pop(0) try: gm = var.GAME_MODES[md][0](*modeargs) gm.startup() for attr in dir(gm): val = getattr(gm, attr) if (hasattr(var, attr) and not callable(val) and not attr.startswith("_")): var.ORIGINAL_SETTINGS[attr] = getattr(var, attr) setattr(var, attr, val) var.CURRENT_GAMEMODE = gm return True except var.InvalidModeException as e: cli.msg(botconfig.CHANNEL, "Invalid mode: "+str(e)) return False else: cli.msg(chan, "Mode \u0002{0}\u0002 not found.".format(modeargs[0])) @cmd("start", phases=("join",)) def start_cmd(cli, nick, chan, rest): """Starts a game of Werewolf.""" start(cli, nick, chan) def start(cli, nick, chan, forced = False, restart = ""): if (not forced and var.LAST_START and nick in var.LAST_START and var.LAST_START[nick] + timedelta(seconds=var.START_RATE_LIMIT) > datetime.now() and not restart): cli.notice(nick, ("This command is rate-limited. Please wait a while " "before using it again.")) return if restart: var.RESTART_TRIES += 1 if var.RESTART_TRIES > 3: stop_game(cli, abort=True) return if not restart: var.LAST_START[nick] = datetime.now() if chan != botconfig.CHANNEL: return villagers = var.list_players() pl = villagers[:] if not restart: if var.PHASE == "none": cli.notice(nick, "No game is currently running.") return if var.PHASE != "join": cli.notice(nick, "Werewolf is already in play.") return if nick not in villagers and nick != chan and not forced: cli.notice(nick, "You're not currently playing.") return now = datetime.now() var.GAME_START_TIME = now # Only used for the idler checker dur = int((var.CAN_START_TIME - now).total_seconds()) if dur > 0 and not forced: plural = "" if dur == 1 else "s" cli.msg(chan, "Please wait at least {0} more second{1}.".format(dur, plural)) return if len(villagers) < var.MIN_PLAYERS: cli.msg(chan, "{0}: \u0002{1}\u0002 or more players are required to play.".format(nick, var.MIN_PLAYERS)) return if len(villagers) > var.MAX_PLAYERS: cli.msg(chan, "{0}: At most \u0002{1}\u0002 players may play.".format(nick, var.MAX_PLAYERS)) return if not var.FGAMED: votes = {} #key = gamemode, not cloak for gamemode in var.GAMEMODE_VOTES.values(): if len(villagers) >= var.GAME_MODES[gamemode][1] and len(villagers) <= var.GAME_MODES[gamemode][2]: votes[gamemode] = votes.get(gamemode, 0) + 1 voted = [gamemode for gamemode in votes if votes[gamemode] == max(votes.values()) and votes[gamemode] >= len(villagers)/2] if len(voted): cgamemode(cli, random.choice(voted)) else: possiblegamemodes = [] for gamemode in var.GAME_MODES.keys(): if len(villagers) >= var.GAME_MODES[gamemode][1] and len(villagers) <= var.GAME_MODES[gamemode][2] and var.GAME_MODES[gamemode][3] > 0: possiblegamemodes += [gamemode]*(var.GAME_MODES[gamemode][3]+votes.get(gamemode, 0)*15) cgamemode(cli, random.choice(possiblegamemodes)) else: cgamemode(cli, restart) var.GAME_ID = time.time() # restart reaper timer addroles = {} event = Event("role_attribution", {"addroles": addroles}) if event.dispatch(cli, var, villagers): for index in range(len(var.ROLE_INDEX) - 1, -1, -1): if var.ROLE_INDEX[index] <= len(villagers): for role, num in var.ROLE_GUIDE.items(): # allow event to override some roles addroles[role] = addroles.get(role, num[index]) break else: cli.msg(chan, "{0}: No game settings are defined for \u0002{1}\u0002 player games.".format(nick, len(villagers))) return if var.ORIGINAL_SETTINGS and not restart: # Custom settings need_reset = True wvs = sum(addroles[r] for r in var.WOLFCHAT_ROLES) if len(villagers) < (sum(addroles.values()) - sum(addroles[r] for r in var.TEMPLATE_RESTRICTIONS.keys())): cli.msg(chan, "There are too few players in the "+ "game to use the custom roles.") elif not wvs: cli.msg(chan, "There has to be at least one wolf!") elif wvs > (len(villagers) / 2): cli.msg(chan, "Too many wolves.") elif set(addroles) != set(var.ROLE_GUIDE): cli.msg(chan, "Error: Not all roles have defined player counts.") else: need_reset = False if need_reset: reset_settings() cli.msg(chan, "The default settings have been restored. Please !start again.") var.PHASE = "join" return if var.ADMIN_TO_PING and not restart: for decor in (COMMANDS.get("join", []) + COMMANDS.get("start", [])): decor(lambda *spam: cli.msg(chan, "This command has been disabled by an admin.")) if not restart: # will already be stored if restarting var.ALL_PLAYERS = copy.copy(var.ROLES["person"]) var.ROLES = {} var.GUNNERS = {} var.WOLF_GUNNERS = {} var.SEEN = [] var.OBSERVED = {} var.KILLS = {} var.GUARDED = {} var.HVISITED = {} var.HUNTERS = [] var.VENGEFUL_GHOSTS = {} var.CLONED = {} var.TARGETED = {} var.LASTGUARDED = {} var.LASTHEXED = {} var.LASTGIVEN = {} var.LOVERS = {} var.MATCHMAKERS = [] var.REVEALED_MAYORS = [] var.SILENCED = [] var.TOBESILENCED = [] var.DESPERATE = [] var.REVEALED = [] var.ASLEEP = [] var.PROTECTED = [] var.JESTERS = [] var.AMNESIACS = [] var.NIGHT_COUNT = 0 var.DAY_COUNT = 0 var.ANGRY_WOLVES = False var.DISEASED_WOLVES = False var.TRAITOR_TURNED = False var.FINAL_ROLES = {} var.ORIGINAL_LOVERS = {} var.IMPATIENT = [] var.DEATH_TOTEM = [] var.PACIFISTS = [] var.INFLUENTIAL = [] var.LYCANTHROPES = [] var.TOBELYCANTHROPES = [] var.LUCKY = [] var.TOBELUCKY = [] var.DISEASED = [] var.TOBEDISEASED = [] var.RETRIBUTION = [] var.MISDIRECTED = [] var.TOBEMISDIRECTED = [] var.EXCHANGED = [] var.SHAMANS = {} var.HEXED = [] var.OTHER_KILLS = {} var.ACTED_EXTRA = 0 var.ABSTAINED = False var.DOCTORS = {} var.IMMUNIZED = set() var.CURED_LYCANS = [] var.ALPHA_WOLVES = [] var.ALPHA_ENABLED = False var.BITTEN = {} var.BITE_PREFERENCES = {} var.BITTEN_ROLES = {} var.LYCAN_ROLES = {} var.AMNESIAC_ROLES = {} var.CHARMERS = set() var.CHARMED = set() var.TOBECHARMED = set() var.ACTIVE_PROTECTIONS = defaultdict(list) var.TURNCOATS = {} var.EXCHANGED_ROLES = [] var.EXTRA_WOLVES = 0 for role, count in addroles.items(): if role in var.TEMPLATE_RESTRICTIONS.keys(): var.ROLES[role] = [None] * count continue # We deal with those later, see below selected = random.sample(villagers, count) var.ROLES[role] = selected for x in selected: villagers.remove(x) for v in villagers: var.ROLES[var.DEFAULT_ROLE].append(v) # Now for the templates for template, restrictions in var.TEMPLATE_RESTRICTIONS.items(): if template == "sharpshooter": continue # sharpshooter gets applied specially possible = pl[:] for cannotbe in var.list_players(restrictions): if cannotbe in possible: possible.remove(cannotbe) if len(possible) < len(var.ROLES[template]): cli.msg(chan, "Not enough valid targets for the {0} template.".format(template)) if var.ORIGINAL_SETTINGS: var.ROLES = {"person": var.ALL_PLAYERS} reset_settings() cli.msg(chan, "The default settings have been restored. Please !start again.") var.PHASE = "join" return else: cli.msg(chan, "This role has been skipped for this game.") var.ROLES[template] = [] continue var.ROLES[template] = random.sample(possible, len(var.ROLES[template])) # Handle gunner cannot_be_sharpshooter = var.list_players(var.TEMPLATE_RESTRICTIONS["sharpshooter"]) gunner_list = copy.copy(var.ROLES["gunner"]) num_sharpshooters = 0 for gunner in gunner_list: if gunner in var.ROLES["village drunk"]: var.GUNNERS[gunner] = (var.DRUNK_SHOTS_MULTIPLIER * math.ceil(var.SHOTS_MULTIPLIER * len(pl))) elif num_sharpshooters < addroles["sharpshooter"] and gunner not in cannot_be_sharpshooter and random.random() <= var.SHARPSHOOTER_CHANCE: var.GUNNERS[gunner] = math.ceil(var.SHARPSHOOTER_MULTIPLIER * len(pl)) var.ROLES["gunner"].remove(gunner) var.ROLES["sharpshooter"].append(gunner) num_sharpshooters += 1 else: var.GUNNERS[gunner] = math.ceil(var.SHOTS_MULTIPLIER * len(pl)) while True: try: var.ROLES["sharpshooter"].remove(None) except ValueError: break if not restart: var.SPECIAL_ROLES["goat herder"] = [] if var.GOAT_HERDER: var.SPECIAL_ROLES["goat herder"] = [ nick ] with var.WARNING_LOCK: # cancel timers for name in ("join", "join_pinger"): if name in var.TIMERS: var.TIMERS[name][0].cancel() del var.TIMERS[name] var.LAST_STATS = None var.LAST_TIME = None var.LAST_VOTES = None if not restart: gamemode = var.CURRENT_GAMEMODE.name # Alert the players to option changes they may not be aware of options = [] if var.ORIGINAL_SETTINGS.get("ROLE_REVEAL") is not None: if var.ROLE_REVEAL == "on": options.append("role reveal") elif var.ROLE_REVEAL == "team": options.append("team reveal") elif var.ROLE_REVEAL == "off": options.append("no role reveal") if var.ORIGINAL_SETTINGS.get("STATS_TYPE") is not None: if var.STATS_TYPE == "disabled": options.append("no stats") else: options.append("{0} stats".format(var.STATS_TYPE)) if var.ORIGINAL_SETTINGS.get("ABSTAIN_ENABLED") is not None or var.ORIGINAL_SETTINGS.get("LIMIT_ABSTAIN") is not None: if var.ABSTAIN_ENABLED and var.LIMIT_ABSTAIN: options.append("restricted abstaining") elif var.ABSTAIN_ENABLED: options.append("unrestricted abstaining") else: options.append("no abstaining") if len(options) > 2: options = " with {0}, and {1}".format(", ".join(options[:-1]), options[-1]) elif len(options) == 2: options = " with {0} and {1}".format(options[0], options[1]) elif len(options) == 1: options = " with {0}".format(options[0]) else: options = "" cli.msg(chan, ("{0}: Welcome to Werewolf, the popular detective/social party "+ "game (a theme of Mafia). Using the \u0002{1}\u0002 game mode{2}.").format(", ".join(pl), gamemode, options)) cli.mode(chan, "+m") var.ORIGINAL_ROLES = copy.deepcopy(var.ROLES) # Make a copy # Handle amnesiac; # matchmaker is blacklisted if AMNESIAC_NIGHTS > 1 due to only being able to act night 1 # clone and traitor are blacklisted due to assumptions made in default !stats computations. # If you remove these from the blacklist you will need to modify the default !stats logic # chains in order to correctly account for these. As a forewarning, such modifications are # nontrivial and will likely require a great deal of thought (and likely new tracking vars) amnroles = list(var.ROLE_GUIDE.keys() - [var.DEFAULT_ROLE, "amnesiac", "clone", "traitor"]) if var.AMNESIAC_NIGHTS > 1 and "matchmaker" in amnroles: amnroles.remove("matchmaker") for nope in var.AMNESIAC_BLACKLIST: if nope in amnroles: amnroles.remove(nope) for nope in var.TEMPLATE_RESTRICTIONS.keys(): if nope in amnroles: amnroles.remove(nope) for amnesiac in var.ROLES["amnesiac"]: var.AMNESIAC_ROLES[amnesiac] = random.choice(amnroles) # Handle doctor for doctor in var.ROLES["doctor"]: var.DOCTORS[doctor] = math.ceil(var.DOCTOR_IMMUNIZATION_MULTIPLIER * len(pl)) for amn in var.AMNESIAC_ROLES: if var.AMNESIAC_ROLES[amn] == "doctor": var.DOCTORS[amn] = math.ceil(var.DOCTOR_IMMUNIZATION_MULTIPLIER * len(pl)) var.DAY_TIMEDELTA = timedelta(0) var.NIGHT_TIMEDELTA = timedelta(0) var.DAY_START_TIME = datetime.now() var.NIGHT_START_TIME = datetime.now() var.LAST_PING = None roles = copy.copy(var.ROLES) for rol in roles: r = [] for rw in var.plural(rol).split(" "): rwu = rw[0].upper() if len(rw) > 1: rwu += rw[1:] r.append(rwu) r = " ".join(r) var.PLAYERS = {plr:dict(var.USERS[plr]) for plr in pl if plr in var.USERS} debuglog("ROLES:", " | ".join("{0}: {1}".format(role, ", ".join(players)) for role, players in sorted(var.ROLES.items()) if players and role not in var.TEMPLATE_RESTRICTIONS.keys())) templates = " | ".join("{0}: {1}".format(tmplt, ", ".join(players)) for tmplt, players in sorted(var.ROLES.items()) if players and tmplt in var.TEMPLATE_RESTRICTIONS.keys()) if not templates: templates = "None" debuglog("TEMPLATES:", templates) if restart: var.PHASE = None # allow transition_* to run properly if game was restarted on first night var.FIRST_NIGHT = True if not var.START_WITH_DAY: var.GAMEPHASE = "night" transition_night(cli) else: var.FIRST_DAY = True var.GAMEPHASE = "day" transition_day(cli) for cloak in list(var.STASISED.keys()): var.STASISED[cloak] -= 1 var.set_stasis(cloak, var.STASISED[cloak]) if var.STASISED[cloak] <= 0: del var.STASISED[cloak] if not var.DISABLE_ACCOUNTS: for acc in list(var.STASISED_ACCS.keys()): var.STASISED_ACCS[acc] -= 1 var.set_stasis_acc(acc, var.STASISED_ACCS[acc]) if var.STASISED_ACCS[acc] <= 0: del var.STASISED_ACCS[acc] if not botconfig.DEBUG_MODE or not var.DISABLE_DEBUG_MODE_REAPER: # DEATH TO IDLERS! reapertimer = threading.Thread(None, reaper, args=(cli,var.GAME_ID)) reapertimer.daemon = True reapertimer.start() @hook("error") def on_error(cli, pfx, msg): if var.RESTARTING or msg.endswith("(Excess Flood)"): _restart_program(cli) elif msg.startswith("Closing Link:"): raise SystemExit @cmd("fstasis", admin_only=True, pm=True) def fstasis(cli, nick, chan, rest): """Removes or sets stasis penalties.""" data = rest.split() msg = None if data: lusers = {k.lower(): v for k, v in var.USERS.items()} user = data[0] if user.lower() in lusers: cloak = lusers[user.lower()]["cloak"] acc = lusers[user.lower()]["account"] else: cloak = user acc = None if var.ACCOUNTS_ONLY and acc == "*": acc = None cloak = None msg = "{0} is not logged in to NickServ.".format(user) if not acc and user in var.STASISED_ACCS: acc = user err_msg = "The amount of stasis has to be a non-negative integer." if (not var.ACCOUNTS_ONLY or not acc) and cloak: if len(data) == 1: if cloak in var.STASISED: plural = "" if var.STASISED[cloak] == 1 else "s" msg = "\u0002{0}\u0002 (Host: {1}) is in stasis for \u0002{2}\u0002 game{3}.".format(data[0], cloak, var.STASISED[cloak], plural) else: msg = "\u0002{0}\u0002 (Host: {1}) is not in stasis.".format(data[0], cloak) else: try: amt = int(data[1]) except ValueError: if chan == nick: pm(cli, nick, err_msg) else: cli.notice(nick, err_msg) return if amt < 0: if chan == nick: pm(cli, nick, err_msg) else: cli.notice(nick, err_msg) return elif amt > 2**31-1: amt = 2**31-1 if amt > 0: var.STASISED[cloak] = amt var.set_stasis(cloak, amt) plural = "" if amt == 1 else "s" msg = "\u0002{0}\u0002 (Host: {1}) is now in stasis for \u0002{2}\u0002 game{3}.".format(data[0], cloak, amt, plural) elif amt == 0: if cloak in var.STASISED: del var.STASISED[cloak] var.set_stasis(cloak, 0) msg = "\u0002{0}\u0002 (Host: {1}) is no longer in stasis.".format(data[0], cloak) else: msg = "\u0002{0}\u0002 (Host: {1}) is not in stasis.".format(data[0], cloak) if not var.DISABLE_ACCOUNTS and acc: if len(data) == 1: if acc in var.STASISED_ACCS: plural = "" if var.STASISED_ACCS[acc] == 1 else "s" msg = "\u0002{0}\u0002 (Account: {1}) is in stasis for \u0002{2}\u0002 game{3}.".format(data[0], acc, var.STASISED_ACCS[acc], plural) else: msg = "\u0002{0}\u0002 (Account: {1}) is not in stasis.".format(data[0], acc) else: try: amt = int(data[1]) except ValueError: if chan == nick: pm(cli, nick, err_msg) else: cli.notice(nick, err_msg) return if amt < 0: if chan == nick: pm(cli, nick, err_msg) else: cli.notice(nick, err_msg) return elif amt > 2**31-1: amt = 2**31-1 if amt > 0: var.STASISED_ACCS[acc] = amt var.set_stasis_acc(acc, amt) plural = "" if amt == 1 else "s" msg = "\u0002{0}\u0002 (Account: {1}) is now in stasis for \u0002{2}\u0002 game{3}.".format(data[0], acc, amt, plural) elif amt == 0: if acc in var.STASISED_ACCS: del var.STASISED_ACCS[acc] var.set_stasis_acc(acc, 0) msg = "\u0002{0}\u0002 (Account: {1}) is no longer in stasis.".format(data[0], acc) else: msg = "\u0002{0}\u0002 (Account: {1}) is not in stasis.".format(data[0], acc) elif var.STASISED or var.STASISED_ACCS: stasised = {} cloakstas = dict(var.STASISED) accstas = dict(var.STASISED_ACCS) for stas in var.USERS: if not var.DISABLE_ACCOUNTS and var.USERS[stas]["account"] in accstas: stasised[var.USERS[stas]["account"]+" (Account)"] = accstas.pop(var.USERS[stas]["account"]) #if var.USERS[stas]["cloak"] in cloakstas: # del cloakstas[var.USERS[stas]["cloak"]] elif var.USERS[stas]["cloak"] in cloakstas: if var.DISABLE_ACCOUNTS: stasised[var.USERS[stas]["cloak"]] = cloakstas.pop(var.USERS[stas]["cloak"]) else: stasised[var.USERS[stas]["cloak"]+" (Host)"] = cloakstas.pop(var.USERS[stas]["cloak"]) for oldcloak in cloakstas: if var.DISABLE_ACCOUNTS: stasised[oldcloak] = cloakstas[oldcloak] else: stasised[oldcloak+" (Host)"] = cloakstas[oldcloak] if not var.DISABLE_ACCOUNTS: for oldacc in accstas: stasised[oldacc+" (Account)"] = accstas[oldacc] msg = "Currently stasised: {0}".format(", ".join( "\u0002{0}\u0002 ({1})".format(usr, number) for usr, number in stasised.items())) else: msg = "Nobody is currently stasised." if msg: if data: tokens = msg.split() if ((data[0] == cloak and tokens[1] == "({0})".format(cloak)) or (data[0] == acc and tokens[1] == "({0})".format(acc))): # Don't show the cloak/account twice. msg = " ".join((tokens[0], " ".join(tokens[2:]))) if chan == nick: pm(cli, nick, msg) else: cli.msg(chan, msg) def is_user_stasised(nick): """Checks if a user is in stasis. Returns a number of games in stasis.""" if nick in var.USERS: cloak = var.USERS[nick]["cloak"] acc = var.USERS[nick]["account"] else: return 0 if not var.DISABLE_ACCOUNTS and acc and acc != "*": if acc in var.STASISED_ACCS: return var.STASISED_ACCS[acc] for clk in var.STASISED: if fnmatch.fnmatch(cloak, clk): return var.STASISED[clk] return 0 def allow_deny(cli, nick, chan, rest, mode): data = rest.split() msg = None modes = ("allow", "deny") assert mode in modes, "mode not in {!r}".format(modes) opts = defaultdict(bool) if data and data[0].startswith("-"): if data[0] == "-cmds": opts["cmds"] = True elif data[0] == "-cmd": if len(data) < 2: if chan == nick: pm(cli, nick, "Error: No command specified. Did you mean \u0002-cmds\u0002?") else: cli.notice(nick, "Error: No command specified. Did you mean \u0002-cmds\u0002?") return opts["cmd"] = data[1] data = data[1:] else: if chan == nick: pm(cli, nick, "Invalid option: {0}".format(data[0][1:])) else: cli.notice(nick, "Invalid option: {0}".format(data[0][1:])) return data = data[1:] if data and not opts["cmd"]: lusers = {k.lower(): v for k, v in var.USERS.items()} user = data[0] if user.lower() in lusers: cloak = lusers[user.lower()]["cloak"] acc = lusers[user.lower()]["account"] else: cloak = user acc = None if not acc or acc == "*": acc = None if not var.DISABLE_ACCOUNTS and acc: if mode == "allow": variable = var.ALLOW_ACCOUNTS else: variable = var.DENY_ACCOUNTS if len(data) == 1: if acc in variable: msg = "\u0002{0}\u0002 (Account: {1}) is {2} the following {3}commands: {4}.".format( data[0], acc, "allowed" if mode == "allow" else "denied", "special " if mode == "allow" else "", ", ".join(variable[acc])) else: msg = "\u0002{0}\u0002 (Account: {1}) is not {2} commands.".format(data[0], acc, "allowed any special" if mode == "allow" else "denied any") else: if acc not in variable: variable[acc] = [] commands = data[1:] for command in commands: # Add or remove commands one at a time to a specific account if "-*" in commands: # Remove all for cmd in variable[acc]: if mode == "allow": var.remove_allow_acc(acc, cmd) else: var.remove_deny_acc(acc, cmd) del variable[acc] break if command[0] == "-": # Starting with - (to remove) rem = True command = command[1:] else: rem = False if command.startswith(botconfig.CMD_CHAR): # ignore command prefix command = command[len(botconfig.CMD_CHAR):] if not rem: if command in COMMANDS and command not in ("fdeny", "fallow", "fsend", "exec", "eval") and command not in variable[acc]: variable[acc].append(command) if mode == "allow": var.add_allow_acc(acc, command) else: var.add_deny_acc(acc, command) elif command in variable[acc]: variable[acc].remove(command) if mode == "allow": var.remove_allow_acc(acc, command) else: var.remove_deny_acc(acc, command) if acc in variable and variable[acc]: msg = "\u0002{0}\u0002 (Account: {1}) is now {2} the following {3}commands: {4}{5}.".format( data[0], acc, "allowed" if mode == "allow" else "denied", "special " if mode == "allow" else "", botconfig.CMD_CHAR, ", {0}".format(botconfig.CMD_CHAR).join(variable[acc])) else: if acc in variable: del variable[acc] msg = "\u0002{0}\u0002 (Account: {1}) is no longer {2} commands.".format(data[0], acc, "allowed any special" if mode == 'allow' else "denied any") elif var.ACCOUNTS_ONLY: msg = "Error: \u0002{0}\u0002 is not logged in to NickServ.".format(data[0]) else: if mode == "allow": variable = var.ALLOW else: variable = var.DENY if len(data) == 1: # List commands for a specific hostmask if cloak in variable: msg = "\u0002{0}\u0002 (Host: {1}) is {2} the following {3}commands: {4}.".format( data[0], cloak, "allowed" if mode == "allow" else "denied", "special " if mode == "allow" else "", ", ".join(variable[cloak])) else: msg = "\u0002{0}\u0002 (Host: {1}) is not {2} commands.".format(data[0], cloak, "allowed any special" if mode == "allow" else "denied any") else: if cloak not in variable: variable[cloak] = [] commands = data[1:] for command in commands: #add or remove commands one at a time to a specific hostmask if "-*" in commands: # Remove all for cmd in variable[cloak]: if mode == "allow": var.remove_allow(cloak, cmd) else: var.remove_deny(cloak, cmd) del variable[cloak] break if command[0] == '-': #starting with - removes rem = True command = command[1:] else: rem = False if command.startswith(botconfig.CMD_CHAR): #ignore command prefix command = command[len(botconfig.CMD_CHAR):] if not rem: if command in COMMANDS and command not in ("fdeny", "fallow", "fsend", "exec", "eval") and command not in variable[cloak]: variable[cloak].append(command) if mode == "allow": var.add_allow(cloak, command) else: var.add_deny(cloak, command) elif command in variable[cloak]: variable[cloak].remove(command) if mode == "allow": var.remove_allow(cloak, command) else: var.remove_deny(cloak, command) if cloak in variable and variable[cloak]: msg = "\u0002{0}\u0002 (Host: {1}) is now {2} the following {3}commands: {4}{5}.".format( data[0], cloak, "allowed" if mode == "allow" else "denied", "special " if mode == "allow" else "", botconfig.CMD_CHAR, ", {0}".format(botconfig.CMD_CHAR).join(variable[cloak])) else: if cloak in variable: del variable[cloak] msg = "\u0002{0}\u0002 (Host: {1}) is no longer {2} commands.".format(data[0], cloak, "allowed any special" if mode == "allow" else "denied any") else: users_to_cmds = {} if not var.DISABLE_ACCOUNTS: if mode == "allow": variable = var.ALLOW_ACCOUNTS else: variable = var.DENY_ACCOUNTS if variable: for acc, varied in variable.items(): if var.ACCOUNTS_ONLY: users_to_cmds[acc] = sorted(varied, key=str.lower) else: users_to_cmds[acc+" (Account)"] = sorted(varied, key=str.lower) if not var.ACCOUNTS_ONLY: if mode == "allow": variable = var.ALLOW else: variable = var.DENY if variable: for cloak, varied in variable.items(): if var.DISABLE_ACCOUNTS: users_to_cmds[cloak] = sorted(varied, key=str.lower) else: users_to_cmds[cloak+" (Host)"] = sorted(varied, key=str.lower) if not users_to_cmds: # Deny or Allow list is empty msg = "Nobody is {0} commands.".format("allowed any special" if mode == "allow" else "denied any") else: if opts["cmds"] or opts["cmd"]: cmds_to_users = defaultdict(list) for user in sorted(users_to_cmds, key=str.lower): for cmd in users_to_cmds[user]: cmds_to_users[cmd].append(user) if opts["cmd"]: cmd = opts["cmd"] users = cmds_to_users[cmd] if cmd not in COMMANDS: if chan == nick: pm(cli, nick, "That command does not exist.") else: cli.notice(nick, "That command does not exist.") return if users: msg = "\u0002{0}{1}\u0002 is {2} to the following people: {3}".format( botconfig.CMD_CHAR, opts["cmd"], "allowed" if mode == "allow" else "denied", ", ".join(users)) else: msg = "\u0002{0}{1}\u0002 is not {2} to any special people.".format( botconfig.CMD_CHAR, opts["cmd"], "allowed" if mode == "allow" else "denied") else: msg = "{0}: {1}".format("Allowed" if mode == "allow" else "Denied", "; ".join("\u0002{0}\u0002 ({1})".format( cmd, ", ".join(users)) for cmd, users in sorted(cmds_to_users.items(), key=lambda t: t[0].lower()))) else: msg = "{0}: {1}".format("Allowed" if mode == "allow" else "Denied", "; ".join("\u0002{0}\u0002 ({1})".format( user, ", ".join(cmds)) for user, cmds in sorted(users_to_cmds.items(), key=lambda t: t[0].lower()))) if msg: if data: tokens = msg.split() if ((data[0] == acc and tokens[1] == "({0})".format(acc)) or (data[0] == cloak and tokens[1] == "({0})".format(cloak))): # Don't show the cloak/account twice. msg = " ".join((tokens[0], " ".join(tokens[2:]))) msg = var.break_long_message(msg.split("; "), "; ") if chan == nick: pm(cli, nick, msg) else: cli.msg(chan, msg) @cmd("fallow", admin_only=True, pm=True) def fallow(cli, nick, chan, rest): """Allow someone to use an admin command.""" allow_deny(cli, nick, chan, rest, "allow") @cmd("fdeny", admin_only=True, pm=True) def fdeny(cli, nick, chan, rest): """Deny someone from using a command.""" allow_deny(cli, nick, chan, rest, "deny") @cmd("wait", "w", playing=True, phases=("join",)) def wait(cli, nick, chan, rest): """Increases the wait time until !start can be used.""" pl = var.list_players() if chan != botconfig.CHANNEL: return with var.WAIT_TB_LOCK: wait_check_time = time.time() var.WAIT_TB_TOKENS += (wait_check_time - var.WAIT_TB_LAST) / var.WAIT_TB_DELAY var.WAIT_TB_LAST = wait_check_time var.WAIT_TB_TOKENS = min(var.WAIT_TB_TOKENS, var.WAIT_TB_BURST) now = datetime.now() if ((var.LAST_WAIT and nick in var.LAST_WAIT and var.LAST_WAIT[nick] + timedelta(seconds=var.WAIT_RATE_LIMIT) > now) or var.WAIT_TB_TOKENS < 1): cli.notice(nick, ("This command is rate-limited. Please wait a while " "before using it again.")) return var.LAST_WAIT[nick] = now var.WAIT_TB_TOKENS -= 1 if now > var.CAN_START_TIME: var.CAN_START_TIME = now + timedelta(seconds=var.EXTRA_WAIT) else: var.CAN_START_TIME += timedelta(seconds=var.EXTRA_WAIT) cli.msg(chan, ("\u0002{0}\u0002 increased the wait time by "+ "{1} seconds.").format(nick, var.EXTRA_WAIT)) @cmd("fwait", admin_only=True, phases=("join",)) def fwait(cli, nick, chan, rest): """Forces an increase (or decrease) in wait time. Can be used with a number of seconds to wait.""" pl = var.list_players() rest = re.split(" +", rest.strip(), 1)[0] if rest and (rest.isdigit() or (rest[0] == "-" and rest[1:].isdigit())): extra = int(rest) else: extra = var.EXTRA_WAIT now = datetime.now() extra = max(-900, min(900, extra)) if now > var.CAN_START_TIME: var.CAN_START_TIME = now + timedelta(seconds=extra) else: var.CAN_START_TIME += timedelta(seconds=extra) cli.msg(chan, ("\u0002{0}\u0002 forcibly {2}creased the wait time by {1} " "second{3}.").format(nick, abs(extra), "in" if extra >= 0 else "de", "s" if extra != 1 else "")) @cmd("fstop", admin_only=True, phases=("join", "day", "night")) def reset_game(cli, nick, chan, rest): """Forces the game to stop.""" if nick == "<stderr>": cli.msg(botconfig.CHANNEL, "Game stopped due to error.") else: cli.msg(botconfig.CHANNEL, "\u0002{0}\u0002 has forced the game to stop.".format(nick)) if var.PHASE != "join": stop_game(cli) else: pl = var.list_players() reset_modes_timers(cli) reset() cli.msg(botconfig.CHANNEL, "PING! {0}".format(" ".join(pl))) @cmd("rules", pm=True) def show_rules(cli, nick, chan, rest): """Displays the rules.""" if (var.PHASE in ("day", "night") and nick not in var.list_players()) and chan != botconfig.CHANNEL: cli.notice(nick, var.RULES) return cli.msg(chan, var.RULES) @cmd("help", raw_nick=True, pm=True) def get_help(cli, rnick, chan, rest): """Gets help.""" nick, mode, user, cloak = parse_nick(rnick) fns = [] rest = rest.strip().replace(botconfig.CMD_CHAR, "", 1).lower() splitted = re.split(" +", rest, 1) cname = splitted.pop(0) rest = splitted[0] if splitted else "" if cname: if cname in COMMANDS.keys(): for fn in COMMANDS[cname]: if fn.__doc__: got = True if callable(fn.__doc__): msg = botconfig.CMD_CHAR+cname+": "+fn.__doc__(rest) else: msg = botconfig.CMD_CHAR+cname+": "+fn.__doc__ if chan == nick: pm(cli, nick, msg) else: cli.notice(nick, msg) return else: got = False continue else: if got: return elif chan == nick: pm(cli, nick, "Documentation for this command is not available.") else: cli.notice(nick, "Documentation for this command is not available.") elif chan == nick: pm(cli, nick, "Command not found.") else: cli.notice(nick, "Command not found.") return # if command was not found, or if no command was given: for name, fn in COMMANDS.items(): if (name and not fn[0].admin_only and not fn[0].owner_only and name not in fn[0].aliases and fn[0].chan): fns.append("{0}{1}{0}".format("\u0002", name)) afns = [] if is_admin(nick, cloak): for name, fn in COMMANDS.items(): if fn[0].admin_only and name not in fn[0].aliases: afns.append("{0}{1}{0}".format("\u0002", name)) fns.sort() # Output commands in alphabetical order if chan == nick: pm(cli, nick, "Commands: {0}".format(var.break_long_message(fns, ", "))) else: cli.notice(nick, "Commands: {0}".format(var.break_long_message(fns, ", "))) if afns: afns.sort() if chan == nick: pm(cli, nick, "Admin Commands: {0}".format(var.break_long_message(afns, ", "))) else: cli.notice(nick, "Admin Commands: {0}".format(var.break_long_message(afns, ", "))) @cmd("wiki", pm=True) def wiki(cli, nick, chan, rest): """Prints information on roles from the wiki.""" # no arguments, just print a link to the wiki if not rest: cli.msg(chan, "https://github.com/lykoss/lykos/wiki") return try: page = urllib.request.urlopen("https://raw.githubusercontent.com/wiki/lykoss/lykos/Home.md", timeout=2).read().decode("ascii", errors="replace") except (urllib.error.URLError, socket.timeout): cli.notice(nick, "Request to https://github.com/lykoss/lykos/wiki timed out.") return if not page: cli.notice(nick, "Could not open https://github.com/lykoss/lykos/wiki") return query = re.escape(rest.strip()) # look for exact match first, then for a partial match match = re.search(r"^##+ ({0})$\r?\n\r?\n^(.*)$".format(query), page, re.MULTILINE + re.IGNORECASE) if not match: match = re.search(r"^##+ ({0}.*)$\r?\n\r?\n^(.*)$".format(query), page, re.MULTILINE + re.IGNORECASE) if not match: cli.notice(nick, "Could not find information on that role in https://github.com/lykoss/lykos/wiki") return # wiki links only have lowercase ascii chars, and spaces are replaced with a dash wikilink = "https://github.com/lykoss/lykos/wiki#{0}".format("".join( x.lower() for x in match.group(1).replace(" ", "-") if x in string.ascii_letters+"-")) if nick == chan: pm(cli, nick, wikilink) pm(cli, nick, var.break_long_message(match.group(2).split())) else: cli.msg(chan, wikilink) cli.notice(nick, var.break_long_message(match.group(2).split())) @hook("invite") def on_invite(cli, raw_nick, something, chan): if chan == botconfig.CHANNEL: cli.join(chan) return # No questions (nick, _, _, cloak) = parse_nick(raw_nick) if is_admin(nick, cloak): cli.join(chan) # Allows the bot to be present in any channel debuglog(nick, "INVITE", chan, display=True) else: pm(cli, parse_nick(nick)[0], "You are not an admin.") @cmd("fpart", raw_nick=True, admin_only=True, pm=True) def fpart(cli, rnick, chan, rest): """Makes the bot forcibly leave a channel.""" nick = parse_nick(rnick)[0] if nick == chan: rest = rest.split() if not rest: pm(cli, nick, "Usage: fpart <channel>") return if rest[0] == botconfig.CHANNEL: pm(cli, nick, "No, that won't be allowed.") return chan = rest[0] pm(cli, nick, "Leaving "+ chan) if chan == botconfig.CHANNEL: cli.notice(nick, "No, that won't be allowed.") return cli.part(chan) @cmd("admins", "ops", pm=True) def show_admins(cli, nick, chan, rest): """Pings the admins that are available.""" admins = [] pl = var.list_players() if (chan != nick and var.LAST_ADMINS and var.LAST_ADMINS + timedelta(seconds=var.ADMINS_RATE_LIMIT) > datetime.now()): cli.notice(nick, ("This command is rate-limited. Please wait a while " "before using it again.")) return if chan != nick or (var.PHASE in ("day", "night") or nick in pl): var.LAST_ADMINS = datetime.now() if var.ADMIN_PINGING: return var.ADMIN_PINGING = True @hook("whoreply", hookid=4) def on_whoreply(cli, server, _, chan, __, cloak, ___, user, status, ____): if not var.ADMIN_PINGING: return if is_admin(user) and "G" not in status and user != botconfig.NICK: admins.append(user) @hook("endofwho", hookid=4) def show(*args): if not var.ADMIN_PINGING: return admins.sort(key=str.lower) msg = "Available admins: " + ", ".join(admins) if chan == nick: pm(cli, nick, msg) elif var.PHASE in ("day", "night") and nick not in pl: cli.notice(nick, msg) else: cli.msg(chan, msg) hook.unhook(4) var.ADMIN_PINGING = False if nick == chan: cli.who(botconfig.CHANNEL) else: cli.who(chan) @cmd("coin", pm=True) def coin(cli, nick, chan, rest): """It's a bad idea to base any decisions on this command.""" if var.PHASE in ("day", "night") and nick not in var.list_players() and chan == botconfig.CHANNEL: cli.notice(nick, "You may not use this command right now.") return cli.msg(chan, "\2{0}\2 tosses a coin into the air...".format(nick)) coin = random.choice(("heads", "tails")) specialty = random.randrange(0,10) if specialty == 0: coin = "its side" if specialty == 1: coin = botconfig.NICK cmsg = "The coin lands on \2{0}\2.".format(coin) cli.msg(chan, cmsg) @cmd("pony", pm=True) def pony(cli, nick, chan, rest): """For entertaining bronies.""" if var.PHASE in ("day", "night") and nick not in var.list_players() and chan == botconfig.CHANNEL: cli.notice(nick, "You may not use this command right now.") return cli.msg(chan, "\2{0}\2 tosses a pony into the air...".format(nick)) pony = random.choice(("hoof", "plot")) cmsg = "The pony lands on \2{0}\2.".format(pony) cli.msg(chan, cmsg) @cmd("time", pm=True, phases=("join", "day", "night")) def timeleft(cli, nick, chan, rest): """Returns the time left until the next day/night transition.""" if (chan != nick and var.LAST_TIME and var.LAST_TIME + timedelta(seconds=var.TIME_RATE_LIMIT) > datetime.now()): cli.notice(nick, ("This command is rate-limited. Please wait a while " "before using it again.")) return if chan != nick: var.LAST_TIME = datetime.now() if var.PHASE == "join": dur = int((var.CAN_START_TIME - datetime.now()).total_seconds()) msg = None if dur > 1: msg = "There are \u0002{0}\u0002 seconds remaining until the game may be started.".format(dur) elif dur == 1: msg = "There is \u00021\u0002 second remaining until the game may be started." if msg is not None: if nick == chan: pm(cli, nick, msg) else: cli.msg(chan, msg) if var.PHASE in var.TIMERS: remaining = timeleft_internal(var.PHASE) if var.PHASE == "day": what = "sunset" elif var.PHASE == "night": what = "sunrise" elif var.PHASE == "join": what = "the game is canceled if it's not started" msg = "There is \u0002{0[0]:0>2}:{0[1]:0>2}\u0002 remaining until {1}.".format(divmod(remaining, 60), what) else: msg = "{0} timers are currently disabled.".format(var.PHASE.capitalize()) if nick == chan: pm(cli, nick, msg) elif nick not in var.list_players() and var.PHASE not in ("none", "join"): cli.notice(nick, msg) else: cli.msg(chan, msg) def timeleft_internal(phase): return int((var.TIMERS[phase][1] + var.TIMERS[phase][2]) - time.time()) if phase in var.TIMERS else -1 @cmd("roles", pm=True) def listroles(cli, nick, chan, rest): """Displays which roles are enabled at a certain number of players.""" old = {} msg = [] index = 0 lpl = len(var.list_players()) + len(var.DEAD) roleindex = var.ROLE_INDEX roleguide = var.ROLE_GUIDE for r in var.ROLE_GUIDE.keys(): old[r] = 0 rest = re.split(" +", rest.strip(), 1) #message if this game mode has been disabled if (not rest[0] or rest[0].isdigit()) and var.GAME_MODES[var.CURRENT_GAMEMODE.name][4]: msg.append("{0}: {1}roles is disabled for the {2} game mode.".format(nick, botconfig.CMD_CHAR, var.CURRENT_GAMEMODE.name)) rest = [] roleindex = {} #prepend player count if called without any arguments elif not rest[0] and lpl > 0: msg.append("{0}: There {1} \u0002{2}\u0002 playing.".format(nick, "is" if lpl == 1 else "are", lpl)) if var.PHASE in ["night", "day"]: msg.append("Using the {0} game mode.".format(var.CURRENT_GAMEMODE.name)) rest = [str(lpl)] #read game mode to get roles for elif rest[0] and not rest[0].isdigit(): gamemode = rest[0] if gamemode not in var.GAME_MODES.keys(): gamemode, _ = complete_match(rest[0], var.GAME_MODES.keys() - ["roles"]) if gamemode in var.GAME_MODES.keys() and gamemode != "roles" and not var.GAME_MODES[gamemode][4]: mode = var.GAME_MODES[gamemode][0]() if hasattr(mode, "ROLE_INDEX") and hasattr(mode, "ROLE_GUIDE"): roleindex = mode.ROLE_INDEX roleguide = mode.ROLE_GUIDE elif gamemode == "default" and "ROLE_INDEX" in var.ORIGINAL_SETTINGS and "ROLE_GUIDE" in var.ORIGINAL_SETTINGS: roleindex = var.ORIGINAL_SETTINGS["ROLE_INDEX"] roleguide = var.ORIGINAL_SETTINGS["ROLE_GUIDE"] rest.pop(0) else: if gamemode in var.GAME_MODES and var.GAME_MODES[gamemode][4]: msg.append("{0}: {1}roles is disabled for the {2} game mode.".format(nick, botconfig.CMD_CHAR, gamemode)) else: msg.append("{0}: {1} is not a valid game mode.".format(nick, rest[0])) rest = [] roleindex = {} #number of players to print the game mode for if rest and rest[0].isdigit(): index = int(rest[0]) for i in range(len(roleindex)-1, -1, -1): if roleindex[i] <= index: index = roleindex[i] break #special ordering roleguide = [(role, roleguide[role]) for role in var.role_order()] for i, num in enumerate(roleindex): #getting the roles at a specific player count if index and num > index: break msg.append("{0}[{1}]{0}".format("\u0002" if num <= lpl else "", str(num))) roles = [] for role, amount in roleguide: direction = 1 if amount[i] > old[role] else -1 for j in range(old[role], amount[i], direction): temp = "{0}{1}".format("-" if direction == -1 else "", role) if direction == 1 and j+1 > 1: temp += "({0})".format(j+1) elif j > 1: temp += "({0})".format(j) roles.append(temp) old[role] = amount[i] msg.append(", ".join(roles)) if not msg: msg = ["No roles are defined for {0}p games.".format(index)] msg = " ".join(msg) if chan == nick: pm(cli, nick, msg) elif nick not in var.list_players() and var.PHASE not in ("none", "join"): cli.notice(nick, msg) else: cli.msg(chan, msg) @cmd("myrole", pm=True, phases=("day", "night")) def myrole(cli, nick, chan, rest): """Reminds you of your current role.""" #special case vengeful ghost (that hasn't been driven away) if nick in var.VENGEFUL_GHOSTS.keys() and var.VENGEFUL_GHOSTS[nick][0] != "!": pm(cli, nick, "You are a \u0002vengeful ghost\u0002 who is against the \u0002{0}\u0002.".format(var.VENGEFUL_GHOSTS[nick])) return ps = var.list_players() if nick not in ps: cli.notice(nick, "You're not currently playing.") return role = var.get_role(nick) if role == "time lord": role = "villager" elif role in ("amnesiac", "vengeful ghost"): role = var.DEFAULT_ROLE an = "n" if role.startswith(("a", "e", "i", "o", "u")) else "" pm(cli, nick, "You are a{0} \u0002{1}\u0002.".format(an, role)) # Remind shamans what totem they have if role in var.TOTEM_ORDER and role != "crazed shaman" and var.PHASE == "night" and nick not in var.SHAMANS: pm(cli, nick, "You have the \u0002{0}\u0002 totem.".format(var.TOTEMS[nick])) # Remind clone who they have cloned if role == "clone" and nick in var.CLONED: pm(cli, nick, "You are cloning \u0002{0}\u0002.".format(var.CLONED[nick])) # Give minion the wolf list they would have recieved night one if role == "minion": wolves = [] for wolfrole in var.WOLF_ROLES: for player in var.ORIGINAL_ROLES[wolfrole]: wolves.append(player) pm(cli, nick, "Original wolves: " + ", ".join(wolves)) # Remind turncoats of their side if role == "turncoat": pm(cli, nick, "Current side: \u0002{0}\u0002.".format(var.TURNCOATS.get(nick, "none"))) # Check for gun/bullets if nick not in var.ROLES["amnesiac"] and nick in var.GUNNERS and var.GUNNERS[nick]: role = "gunner" if nick in var.ROLES["sharpshooter"]: role = "sharpshooter" if var.GUNNERS[nick] == 1: pm(cli, nick, "You are a {0} and have a \u0002gun\u0002 with {1} {2}.".format(role, var.GUNNERS[nick], "bullet")) else: pm(cli, nick, "You are a {0} and have a \u0002gun\u0002 with {1} {2}.".format(role, var.GUNNERS[nick], "bullets")) elif nick in var.WOLF_GUNNERS and var.WOLF_GUNNERS[nick]: if var.WOLF_GUNNERS[nick] == 1: pm(cli, nick, "You have a \u0002gun\u0002 with {0} {1}.".format(var.WOLF_GUNNERS[nick], "bullet")) else: pm(cli, nick, "You have a \u0002gun\u0002 with {0} {1}.".format(var.WOLF_GUNNERS[nick], "bullets")) # Check assassin if nick in var.ROLES["assassin"] and nick not in var.ROLES["amnesiac"]: pm(cli, nick, "You are an \u0002assassin\u0002{0}.".format(" and targeting {0}".format(var.TARGETED[nick]) if nick in var.TARGETED else "")) # Remind player if they were bitten by alpha wolf if nick in var.BITTEN and role not in var.WOLF_ROLES: pm(cli, nick, "You were bitten by an alpha wolf and have \u0002{0} night{1}\u0002 until your transformation.".format(max(var.BITTEN[nick], 0), "" if var.BITTEN[nick] == 1 else "s")) # Remind lovers of each other if nick in ps and nick in var.LOVERS: message = "You are \u0002in love\u0002 with " lovers = sorted(list(set(var.LOVERS[nick]))) if len(lovers) == 1: message += lovers[0] elif len(lovers) == 2: message += lovers[0] + " and " + lovers[1] else: message += ", ".join(lovers[:-1]) + ", and " + lovers[-1] message += "." pm(cli, nick, message) @cmd("faftergame", admin_only=True, raw_nick=True, pm=True) def aftergame(cli, rawnick, chan, rest): """Schedule a command to be run after the current game.""" nick = parse_nick(rawnick)[0] if not rest.strip(): cli.notice(nick, "Incorrect syntax for this command.") return rst = re.split(" +", rest) cmd = rst.pop(0).lower().replace(botconfig.CMD_CHAR, "", 1).strip() if cmd in COMMANDS.keys(): def do_action(): for fn in COMMANDS[cmd]: fn.aftergame = True fn.caller(cli, rawnick, botconfig.CHANNEL if fn.chan else nick, " ".join(rst)) fn.aftergame = False else: cli.notice(nick, "That command was not found.") return if var.PHASE == "none": do_action() return fullcmd = cmd if rst: fullcmd += " " fullcmd += " ".join(rst) cli.msg(botconfig.CHANNEL, ("The command \u0002{0}\u0002 has been scheduled to run "+ "after this game by \u0002{1}\u0002.").format(fullcmd, nick)) var.AFTER_FLASTGAME = do_action @cmd("flastgame", admin_only=True, raw_nick=True, pm=True) def flastgame(cli, rawnick, chan, rest): """Disables starting or joining a game, and optionally schedules a command to run after the current game ends.""" nick, _, __, cloak = parse_nick(rawnick) chan = botconfig.CHANNEL if var.PHASE != "join": for decor in (COMMANDS.get("join", []) + COMMANDS.get("start", [])): decor(lambda *spam: cli.msg(chan, "This command has been disabled by an admin.")) cli.msg(chan, "Creating a new game has now been disabled by \u0002{0}\u0002.".format(nick)) var.ADMIN_TO_PING = nick if rest.strip(): aftergame.func(cli, rawnick, botconfig.CHANNEL, rest) @cmd("gamestats", "gstats", pm=True) def game_stats(cli, nick, chan, rest): """Gets the game stats for a given game size or lists game totals for all game sizes if no game size is given.""" if (chan != nick and var.LAST_GSTATS and var.GSTATS_RATE_LIMIT and var.LAST_GSTATS + timedelta(seconds=var.GSTATS_RATE_LIMIT) > datetime.now()): cli.notice(nick, ("This command is rate-limited. Please wait a while " "before using it again.")) return if chan != nick: var.LAST_GSTATS = datetime.now() if var.PHASE not in ("none", "join"): cli.notice(nick, "Wait until the game is over to view stats.") return gamemode = var.CURRENT_GAMEMODE.name gamesize = None rest = rest.split() # Check for gamemode if len(rest) and not rest[0].isdigit(): gamemode = rest[0] if gamemode not in var.GAME_MODES.keys(): gamemode, _ = complete_match(gamemode, var.GAME_MODES.keys()) if not gamemode: cli.notice(nick, "{0} is not a valid game mode".format(rest[0])) return rest.pop(0) # Check for invalid input if len(rest) and rest[0].isdigit(): gamesize = int(rest[0]) if gamesize > var.GAME_MODES[gamemode][2] or gamesize < var.GAME_MODES[gamemode][1]: cli.notice(nick, "Please enter an integer between "+\ "{0} and {1}.".format(var.GAME_MODES[gamemode][1], var.GAME_MODES[gamemode][2])) return # List all games sizes and totals if no size is given if not gamesize: if chan == nick: pm(cli, nick, var.get_game_totals(gamemode)) else: cli.msg(chan, var.get_game_totals(gamemode)) else: # Attempt to find game stats for the given game size if chan == nick: pm(cli, nick, var.get_game_stats(gamemode, gamesize)) else: cli.msg(chan, var.get_game_stats(gamemode, gamesize)) @cmd("playerstats", "pstats", "player", "p", pm=True) def player_stats(cli, nick, chan, rest): """Gets the stats for the given player and role or a list of role totals if no role is given.""" if (chan != nick and var.LAST_PSTATS and var.PSTATS_RATE_LIMIT and var.LAST_PSTATS + timedelta(seconds=var.PSTATS_RATE_LIMIT) > datetime.now()): cli.notice(nick, ("This command is rate-limited. Please wait a while " "before using it again.")) return if chan != nick: var.LAST_PSTATS = datetime.now() params = rest.split() # Check if we have enough parameters if params: user = params[0] else: user = nick # Find the player's account if possible luser = user.lower() lusers = {k.lower(): v for k, v in var.USERS.items()} if luser in lusers and not var.DISABLE_ACCOUNTS: acc = lusers[luser]["account"] if acc == "*": if luser == nick.lower(): cli.notice(nick, "You are not logged in to NickServ.") else: cli.notice(nick, user + " is not logged in to NickServ.") return else: acc = user # List the player's total games for all roles if no role is given if len(params) < 2: message = var.get_player_totals(acc) if chan == nick: pm(cli, nick, message) else: cli.notice(nick, message) else: role = " ".join(params[1:]) # Attempt to find the player's stats message = var.get_player_stats(acc, role) if chan == nick: pm(cli, nick, message) elif var.PHASE not in ("none", "join"): cli.notice(nick, message) else: cli.msg(chan, message) @cmd("mystats", "m", pm=True) def my_stats(cli, nick, chan, rest): """Get your own stats.""" rest = rest.split() player_stats.func(cli, nick, chan, " ".join([nick] + rest)) @cmd("game", playing=True, phases=("join",)) def game(cli, nick, chan, rest): """Vote for a game mode to be picked.""" if rest: gamemode = rest.lower().split()[0] else: gamemodes = ", ".join("\u0002{0}\u0002".format(gamemode) if len(var.list_players()) in range(var.GAME_MODES[gamemode][1], var.GAME_MODES[gamemode][2]+1) else gamemode for gamemode in var.GAME_MODES.keys() if gamemode != "roles") cli.notice(nick, "No game mode specified. Available game modes: " + gamemodes) return if var.FGAMED: cli.notice(nick, "A game mode has already been forced by an admin.") return if gamemode not in var.GAME_MODES.keys(): match, _ = complete_match(gamemode, var.GAME_MODES.keys() - ["roles"]) if not match: cli.notice(nick, "\u0002{0}\u0002 is not a valid game mode.".format(gamemode)) return gamemode = match if gamemode != "roles": var.GAMEMODE_VOTES[nick] = gamemode cli.msg(chan, "\u0002{0}\u0002 votes for the \u0002{1}\u0002 game mode.".format(nick, gamemode)) else: cli.notice(nick, "You can't vote for that game mode.") def game_help(args=""): return "Votes to make a specific game mode more likely. Available game mode setters: " +\ ", ".join("\u0002{0}\u0002".format(gamemode) if len(var.list_players()) in range(var.GAME_MODES[gamemode][1], var.GAME_MODES[gamemode][2]+1) else gamemode for gamemode in var.GAME_MODES.keys() if gamemode != "roles") game.__doc__ = game_help @cmd("vote", "v", pm=True, phases=("join", "day")) def vote(cli, nick, chan, rest): """Vote for a game mode if no game is running, or for a player to be lynched.""" if rest: if var.PHASE == "join" and chan != nick: return game.caller(cli, nick, chan, rest) else: return lynch.caller(cli, nick, chan, rest) else: return show_votes.caller(cli, nick, chan, rest) @cmd("fpull", admin_only=True, pm=True) def fpull(cli, nick, chan, rest): """Pulls from the repository to update the bot.""" commands = ["git fetch", "git rebase --stat --preserve-merges"] for command in commands: child = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = child.communicate() ret = child.returncode for line in (out + err).splitlines(): if chan == nick: cli.msg(nick, line.decode("utf-8")) else: pm(cli, nick, line.decode("utf-8")) if ret != 0: if ret < 0: cause = "signal" ret *= -1 else: cause = "status" if chan == nick: cli.msg(nick, "Process %s exited with %s %d" % (command, cause, ret)) else: pm(cli, nick, "Process %s exited with %s %d" % (command, cause, ret)) @cmd("fsend", admin_only=True, pm=True) def fsend(cli, nick, chan, rest): """Forcibly send raw IRC commands to the server.""" cli.send(rest) def _say(cli, raw_nick, rest, command, action=False): (nick, _, _, cloak) = parse_nick(raw_nick) rest = rest.split(" ", 1) if len(rest) < 2: pm(cli, nick, "Usage: {0}{1} <target> <message>".format( botconfig.CMD_CHAR, command)) return (target, message) = rest if not is_admin(nick, cloak): if nick not in var.USERS: pm(cli, nick, "You have to be in {0} to use this command.".format( botconfig.CHANNEL)) return if rest[0] != botconfig.CHANNEL: pm(cli, nick, ("You do not have permission to message this user " "or channel.")) return if action: message = "\u0001ACTION {0}\u0001".format(message) cli.send("PRIVMSG {0} :{1}".format(target, message)) @cmd("fsay", admin_only=True, raw_nick=True, pm=True) def fsay(cli, raw_nick, chan, rest): """Talk through the bot as a normal message.""" _say(cli, raw_nick, rest, "fsay") @cmd("fact", "fdo", "fme", admin_only=True, raw_nick=True, pm=True) def fact(cli, raw_nick, chan, rest): """Act through the bot as an action.""" _say(cli, raw_nick, rest, "fact", action=True) before_debug_mode_commands = list(COMMANDS.keys()) if botconfig.DEBUG_MODE or botconfig.ALLOWED_NORMAL_MODE_COMMANDS: @cmd("eval", owner_only=True, pm=True) def pyeval(cli, nick, chan, rest): """Evaluate a Python expression.""" try: a = str(eval(rest)) if len(a) < 500: cli.msg(chan, a) else: cli.msg(chan, a[:500]) except Exception as e: cli.msg(chan, str(type(e))+":"+str(e)) @cmd("exec", owner_only=True, pm=True) def py(cli, nick, chan, rest): """Execute arbitrary Python code.""" try: exec(rest) except Exception as e: cli.msg(chan, str(type(e))+":"+str(e)) @cmd("revealroles", admin_only=True, pm=True, phases=("day", "night")) def revealroles(cli, nick, chan, rest): """Reveal role information.""" def is_authorized(): # if allowed in normal games, restrict it so that it can only be used by dead players and # non-players (don't allow active vengeful ghosts either). # also don't allow in-channel (e.g. make it pm only) if botconfig.DEBUG_MODE: return True pl = var.list_players() + [vg for (vg, against) in var.VENGEFUL_GHOSTS.items() if not against.startswith("!")] if nick in pl: return False if nick in var.USERS and var.USERS[nick]["account"] in [var.USERS[player]["account"] for player in pl if player in var.USERS]: return False if nick in var.USERS and var.USERS[nick]["cloak"] in [var.USERS[player]["cloak"] for player in pl if player in var.USERS]: return False return True if not is_authorized(): if chan == nick: pm(cli, nick, "You are not allowed to use that command right now.") else: cli.notice(nick, "You are not allowed to use that command right now.") return output = [] for role in var.role_order(): if role in var.ROLES and var.ROLES[role]: # make a copy since this list is modified nicks = copy.copy(var.ROLES[role]) # go through each nickname, adding extra info if necessary for i in range(len(nicks)): special_case = [] nickname = nicks[i] if role == "assassin" and nickname in var.TARGETED: special_case.append("targeting {0}".format(var.TARGETED[nickname])) elif role in var.TOTEM_ORDER and nickname in var.TOTEMS: if nickname in var.SHAMANS: special_case.append("giving {0} totem to {1}".format(var.TOTEMS[nickname], var.SHAMANS[nickname])) elif var.PHASE == "night": special_case.append("has {0} totem".format(var.TOTEMS[nickname])) elif nickname in var.LASTGIVEN: special_case.append("gave {0} totem to {1}".format(var.TOTEMS[nickname], var.LASTGIVEN[nickname])) elif role == "clone" and nickname in var.CLONED: special_case.append("cloning {0}".format(var.CLONED[nickname])) elif role == "amnesiac" and nickname in var.AMNESIAC_ROLES: special_case.append("will become {0}".format(var.AMNESIAC_ROLES[nickname])) # print how many bullets normal gunners have elif (role == "gunner" or role == "sharpshooter") and nickname in var.GUNNERS: special_case.append("{0} bullet{1}".format(var.GUNNERS[nickname], "" if var.GUNNERS[nickname] == 1 else "s")) elif role == "turncoat" and nickname in var.TURNCOATS: special_case.append("currently with \u0002{0}\u0002".format(var.TURNCOATS[nickname][0]) if var.TURNCOATS[nickname][0] != "none" else "not currently on any side") # print out how many bullets wolf gunners have if nickname in var.WOLF_GUNNERS and role not in var.TEMPLATE_RESTRICTIONS: special_case.append("wolf gunner with {0} bullet{1}".format(var.WOLF_GUNNERS[nickname], "" if var.WOLF_GUNNERS[nickname] == 1 else "s")) if nickname not in var.ORIGINAL_ROLES[role] and role not in var.TEMPLATE_RESTRICTIONS: for old_role in var.role_order(): # order doesn't matter here, but oh well if nickname in var.ORIGINAL_ROLES[old_role] and nickname not in var.ROLES[old_role]: special_case.append("was {0}".format(old_role)) break if special_case: nicks[i] = "".join((nicks[i], " (", ", ".join(special_case), ")")) output.append("\u0002{0}\u0002: {1}".format(role, ", ".join(nicks))) # print out lovers too done = {} lovers = [] for lover1, llist in var.LOVERS.items(): for lover2 in llist: # check if already said the pairing if (lover1 in done and lover2 in done[lover1]) or (lover2 in done and lover1 in done[lover2]): continue lovers.append("{0}/{1}".format(lover1, lover2)) if lover1 in done: done[lover1].append(lover2) else: done[lover1] = [lover2] if len(lovers) == 1 or len(lovers) == 2: output.append("\u0002lovers\u0002: {0}".format(" and ".join(lovers))) elif len(lovers) > 2: output.append("\u0002lovers\u0002: {0}, and {1}".format(", ".join(lovers[0:-1]), lovers[-1])) # print out vengeful ghosts, also vengeful ghosts that were driven away by 'retribution' totem if var.VENGEFUL_GHOSTS: output.append("\u0002dead vengeful ghost\u0002: {0}".format(", ".join("{0} ({1}against {2})".format( ghost, team.startswith("!") and "driven away, " or "", team.lstrip("!")) for (ghost, team) in var.VENGEFUL_GHOSTS.items()))) #show bitten users + days until turning if var.BITTEN and next((days for (nickname,days) in var.BITTEN.items() if days > 0 or var.get_role(nickname) not in var.WOLF_ROLES), None) is not None: output.append("\u0002bitten\u0002: {0}".format(", ".join("{0} ({1} night{2} until transformation)".format( nickname, max(days, 0), "" if days == 1 else "s") for (nickname,days) in var.BITTEN.items() if days > 0 or var.get_role(nickname) not in var.WOLF_ROLES))) #show who got immunized if var.IMMUNIZED: output.append("\u0002immunized\u0002: {0}".format(", ".join(var.IMMUNIZED))) # get charmed players if var.CHARMED | var.TOBECHARMED: output.append("\u0002charmed players\u0002: {0}".format(", ".join(var.CHARMED | var.TOBECHARMED))) if chan == nick: pm(cli, nick, var.break_long_message(output, " | ")) else: if botconfig.DEBUG_MODE: cli.msg(chan, var.break_long_message(output, " | ")) else: cli.notice(nick, var.break_long_message(output, " | ")) @cmd("fgame", admin_only=True, raw_nick=True, phases=("join",)) def fgame(cli, nick, chan, rest): """Force a certain game mode to be picked. Disable voting for game modes upon use.""" nick = parse_nick(nick)[0] pl = var.list_players() if nick not in pl and not is_admin(nick): cli.notice(nick, "You're not currently playing.") return if rest: gamemode = rest.strip().lower() parts = gamemode.split("=", 1) if len(parts) > 1: gamemode, modeargs = parts else: gamemode = parts[0] modeargs = None if gamemode not in var.GAME_MODES.keys(): gamemode = gamemode.split()[0] gamemode, _ = complete_match(gamemode, var.GAME_MODES.keys()) if not gamemode: cli.notice(nick, "\u0002{0}\u0002 is not a valid game mode.".format(rest)) return parts[0] = gamemode if cgamemode(cli, "=".join(parts)): cli.msg(chan, ("\u0002{0}\u0002 has changed the game settings " "successfully.").format(nick)) var.FGAMED = True else: cli.notice(nick, fgame.__doc__()) def fgame_help(args=""): args = args.strip() if not args: return "Available game mode setters: " + ", ".join(var.GAME_MODES.keys()) elif args in var.GAME_MODES.keys(): return var.GAME_MODES[args][0].__doc__ or "Game mode {0} has no doc string".format(args) else: return "Game mode setter \u0002{0}\u0002 not found.".format(args) fgame.__doc__ = fgame_help # DO NOT MAKE THIS A PMCOMMAND ALSO @cmd("force", admin_only=True) def force(cli, nick, chan, rest): """Force a certain player to use a specific command.""" rst = re.split(" +",rest) if len(rst) < 2: cli.msg(chan, "The syntax is incorrect.") return who = rst.pop(0).strip() if not who or who == botconfig.NICK: cli.msg(chan, "That won't work.") return if who == "*": who = var.list_players() else: if not is_fake_nick(who): ul = list(var.USERS.keys()) ull = [u.lower() for u in ul] if who.lower() not in ull: cli.msg(chan, "This can only be done on players in the channel or fake nicks.") return else: who = [ul[ull.index(who.lower())]] else: who = [who] comm = rst.pop(0).lower().replace(botconfig.CMD_CHAR, "", 1) if comm in COMMANDS and not COMMANDS[comm][0].owner_only: for fn in COMMANDS[comm]: if fn.owner_only: continue if fn.admin_only and nick in var.USERS and not is_admin(nick): # Not a full admin cli.notice(nick, "Only full admins can force an admin-only command.") continue for user in who: if fn.chan: fn.caller(cli, user, chan, " ".join(rst)) else: fn.caller(cli, user, user, " ".join(rst)) cli.msg(chan, "Operation successful.") else: cli.msg(chan, "That command was not found.") @cmd("rforce", admin_only=True) def rforce(cli, nick, chan, rest): """Force all players of a given role to perform a certain action.""" rst = re.split(" +",rest) if len(rst) < 2: cli.msg(chan, "The syntax is incorrect.") return who = rst.pop(0).strip().lower() who = who.replace("_", " ") if who == "*": # wildcard match tgt = var.list_players() elif (who not in var.ROLES or not var.ROLES[who]) and (who != "gunner" or var.PHASE in ("none", "join")): cli.msg(chan, nick+": invalid role") return elif who == "gunner": tgt = list(var.GUNNERS.keys()) else: tgt = var.ROLES[who] comm = rst.pop(0).lower().replace(botconfig.CMD_CHAR, "", 1) if comm in COMMANDS and not COMMANDS[comm][0].owner_only: for fn in COMMANDS[comm]: if fn.owner_only: continue if fn.admin_only and nick in var.USERS and not is_admin(nick): # Not a full admin cli.notice(nick, "Only full admins can force an admin-only command.") continue for user in tgt[:]: if fn.chan: fn.caller(cli, user, chan, " ".join(rst)) else: fn.caller(cli, user, user, " ".join(rst)) cli.msg(chan, "Operation successful.") else: cli.msg(chan, "That command was not found.") @cmd("frole", admin_only=True) def frole(cli, nick, chan, rest): """Change the role or template of a player.""" rst = re.split(" +",rest) if len(rst) < 2: cli.msg(chan, "The syntax is incorrect.") return who = rst.pop(0).strip() rol = " ".join(rst).strip() ul = list(var.USERS.keys()) ull = [u.lower() for u in ul] if who.lower() not in ull: if not is_fake_nick(who): cli.msg(chan, "Could not be done.") cli.msg(chan, "The target needs to be in this channel or a fake name.") return if not is_fake_nick(who): who = ul[ull.index(who.lower())] if who == botconfig.NICK or not who: cli.msg(chan, "No.") return pl = var.list_players() rolargs = re.split("\s*=\s*", rol, 1) rol = rolargs[0] if rol[1:] in var.TEMPLATE_RESTRICTIONS.keys(): addrem = rol[0] rol = rol[1:] is_gunner = (rol == "gunner" or rol == "sharpshooter") if addrem == "+" and who not in var.ROLES[rol]: if is_gunner: if len(rolargs) == 2 and rolargs[1].isdigit(): if len(rolargs[1]) < 7: var.GUNNERS[who] = int(rolargs[1]) var.WOLF_GUNNERS[who] = int(rolargs[1]) else: var.GUNNERS[who] = 999 var.WOLF_GUNNERS[who] = 999 elif rol == "gunner": var.GUNNERS[who] = math.ceil(var.SHOTS_MULTIPLIER * len(pl)) else: var.GUNNERS[who] = math.ceil(var.SHARPSHOOTER_MULTIPLIER * len(pl)) if who not in pl: var.ROLES[var.DEFAULT_ROLE].append(who) if not is_fake_nick(who): cli.mode(chan, "+v", who) cli.msg(chan, "Added default role ({0}) because only a template was specified for a new player.".format(var.DEFAULT_ROLE)) var.ROLES[rol].append(who) elif addrem == "-" and who in var.ROLES[rol]: var.ROLES[rol].remove(who) if is_gunner and who in var.GUNNERS: del var.GUNNERS[who] else: cli.msg(chan, "Improper template modification.") return elif rol in var.TEMPLATE_RESTRICTIONS.keys(): cli.msg(chan, "Please specify \u0002+{0}\u0002 or \u0002-{0}\u0002 to add/remove this template.".format(rol)) return elif rol in var.ROLES.keys(): if who in pl: oldrole = var.get_role(who) var.ROLES[oldrole].remove(who) if rol in var.TOTEM_ORDER: if len(rolargs) == 2: var.TOTEMS[who] = rolargs[1] else: max_totems = {} for sham in var.TOTEM_ORDER: max_totems[sham] = 0 for ix in range(len(var.TOTEM_ORDER)): for c in var.TOTEM_CHANCES.values(): max_totems[var.TOTEM_ORDER[ix]] += c[ix] for shaman in var.list_players(var.TOTEM_ORDER): indx = var.TOTEM_ORDER.index(rol) target = 0 rand = random.random() * max_totems[var.TOTEM_ORDER[indx]] for t in var.TOTEM_CHANCES.keys(): target += var.TOTEM_CHANCES[t][indx] if rand <= target: var.TOTEMS[shaman] = t break var.ROLES[rol].append(who) if not is_fake_nick(who): cli.mode(chan, "+v", who) else: cli.msg(chan, "Not a valid role.") return cli.msg(chan, "Operation successful.") if var.PHASE not in ("none", "join"): # default stats determination does not work if we're mucking with !frole if var.STATS_TYPE == "default": var.ORIGINAL_SETTINGS["STATS_TYPE"] = var.STATS_TYPE var.STATS_TYPE = "accurate" cli.msg(chan, "!stats type changed to accurate due to use of !frole.") chk_win(cli) if botconfig.ALLOWED_NORMAL_MODE_COMMANDS and not botconfig.DEBUG_MODE: for comd in list(COMMANDS.keys()): if (comd not in before_debug_mode_commands and comd not in botconfig.ALLOWED_NORMAL_MODE_COMMANDS): del COMMANDS[comd] # vim: set expandtab:sw=4:ts=4:
Agent-Isai/lykos
src/wolfgame.py
Python
bsd-2-clause
397,683
[ "VisIt", "exciting" ]
1473d8cfc2b948f92bdbe19e5a1e1ac6a7514dd194322501cb3e8f2ff02228f9
#!/usr/bin/env python ######################################################################## # $HeadURL$ # File : dirac-wms-job-submit # Author : Stuart Paterson ######################################################################## """ Submit jobs to DIRAC WMS """ __RCSID__ = "$Id$" import DIRAC from DIRAC.Core.Base import Script import os Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1], 'Usage:', ' %s [option|cfgfile] ... JDL ...' % Script.scriptName, 'Arguments:', ' JDL: Path to JDL file' ] ) ) Script.registerSwitch( "f:", "File=", "Writes job ids to file <value>" ) Script.registerSwitch( "r:", "UseJobRepo=", "Use the job repository") Script.parseCommandLine( ignoreErrors = True ) args = Script.getPositionalArgs() if len( args ) < 1: Script.showHelp() from DIRAC.Interfaces.API.Dirac import Dirac unprocessed_switches = Script.getUnprocessedSwitches() use_repo = False repo_name = "" for sw, value in unprocessed_switches: if sw.lower() in ["r", "usejobrepo"]: use_repo = True repo_name = value repo_name = repo_name.replace(".cfg", ".repo") dirac = Dirac(use_repo, repo_name) exitCode = 0 errorList = [] jFile = None for sw, value in unprocessed_switches: if sw.lower() in ( 'f', 'file' ): if os.path.isfile( value ): print 'Appending job ids to existing logfile: %s' %value if not os.access( value , os.W_OK ): print 'Existing logfile %s must be writable by user.' %value jFile = open( value, 'a' ) for jdl in args: result = dirac.submitJob( jdl ) if result['OK']: print 'JobID = %s' % ( result['Value'] ) if jFile != None: # parametric jobs if isinstance( result['Value'], list ): jFile.write( '\n'.join(str(p) for p in result['Value']) ) jFile.write( '\n' ) else: jFile.write( str( result['Value'] )+'\n' ) else: errorList.append( ( jdl, result['Message'] ) ) exitCode = 2 if jFile != None: jFile.close() for error in errorList: print "ERROR %s: %s" % error DIRAC.exit( exitCode )
andresailer/DIRAC
Interfaces/scripts/dirac-wms-job-submit.py
Python
gpl-3.0
2,233
[ "DIRAC" ]
d8aea18cf7b91b60ef1d0d82d3ba0740f1904f777e3632881e2d9262424bf215
""" Callback when a staging operation is finished """ __RCSID__ = "$Id$" from DIRAC import S_OK from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase from DIRAC.WorkloadManagementSystem.Client.JobStateUpdateClient import JobStateUpdateClient class StagingCallback(OperationHandlerBase): """ .. class:: StagingCallback This performs the 'Done' callback to a job waiting for the staging to finish Currently, we cannot store the JobID in the field reserved in the Request, because then our crapy finalization system will try updating the job (minor) status So we store the job ID in the Argument field of operation """ def __init__(self, operation=None, csPath=None): """ c'tor :param Operation operation: an Operation instance :param str csPath: CS path for this handler """ super(StagingCallback, self).__init__(operation, csPath) def __call__(self): """ update the job status """ # # decode arguments jobID = self.operation.Arguments self.log.info("Performing callback to job %s" % jobID) res = JobStateUpdateClient().updateJobFromStager(jobID, 'Done') if not res['OK']: self.log.error("Error performing the callback to the job", res) return res self.operation.Status = "Done" self.log.info("Callback from staging done") return S_OK()
chaen/DIRAC
DataManagementSystem/Agent/RequestOperations/StagingCallback.py
Python
gpl-3.0
1,385
[ "DIRAC" ]
f360920c81a58671d9e3577fe9e52b4b8e69151af6a24b3ee6caf61bbd7747af
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys from ansible import constants as C ANSIBLE_COLOR=True if C.ANSIBLE_NOCOLOR: ANSIBLE_COLOR=False elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty(): ANSIBLE_COLOR=False else: try: import curses curses.setupterm() if curses.tigetnum('colors') < 0: ANSIBLE_COLOR=False except ImportError: # curses library was not found pass except curses.error: # curses returns an error (e.g. could not find terminal) ANSIBLE_COLOR=False if C.ANSIBLE_FORCE_COLOR: ANSIBLE_COLOR=True # --- begin "pretty" # # pretty - A miniature library that provides a Python print and stdout # wrapper that makes colored terminal text easier to use (e.g. without # having to mess around with ANSI escape sequences). This code is public # domain - there is no license except that you must leave this header. # # Copyright (C) 2008 Brian Nez <thedude at bri1 dot com> # # http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/ codeCodes = { 'black': u'0;30', 'bright gray': u'0;37', 'blue': u'0;34', 'white': u'1;37', 'green': u'0;32', 'bright blue': u'1;34', 'cyan': u'0;36', 'bright green': u'1;32', 'red': u'0;31', 'bright cyan': u'1;36', 'purple': u'0;35', 'bright red': u'1;31', 'yellow': u'0;33', 'bright purple': u'1;35', 'dark gray': u'1;30', 'bright yellow': u'1;33', 'magenta': u'0;35', 'bright magenta': u'1;35', 'normal': u'0' , } def stringc(text, color): """String in color.""" if ANSIBLE_COLOR: return u"\033[%sm%s\033[0m" % (codeCodes[color], text) else: return text # --- end "pretty" def colorize(lead, num, color): """ Print 'lead' = 'num' in 'color' """ s = u"%s=%-4s" % (lead, str(num)) if num != 0 and ANSIBLE_COLOR and color is not None: s = stringc(s, color) return s def hostcolor(host, stats, color=True): if ANSIBLE_COLOR and color: if stats['failures'] != 0 or stats['unreachable'] != 0: return u"%-37s" % stringc(host, 'red') elif stats['changed'] != 0: return u"%-37s" % stringc(host, 'yellow') else: return u"%-37s" % stringc(host, 'green') return u"%-26s" % host
wkeeling/ansible
lib/ansible/utils/color.py
Python
gpl-3.0
3,144
[ "Brian" ]
3ab137f3d27be3caed81fc37d9d2899ee16ee7c4e5b111f5464b7af71c99d5aa
import os import btor from btor import BtoRGUIClasses as ui from btor.BtoRTypes import * import cgkit import cgkit.rmshader import cgkit.cgtypes import cgkit.quadrics from cgkit import ri as ri from cgkit import ribexport as export import Blender import xml.dom.minidom import new import math from sets import Set import sys import StringIO import protocols import random import md5 import traceback import re # interfaces class IProperty(protocols.Interface): def getValue(): pass def setValue(): pass class IPropertyEditor(protocols.Interface): def getValue(): """" get the value of the property """ def setValue(): """ set the value of the property """ class IObjectAdapter(protocols.Interface): def render(): """ Render the object""" def getInfo(): """ Get the object's information hash. """ def initObjectData(): """ Initialize the object's BtoR data """ def loadData(): """ Load object's data """ def saveData(): """ Save object's Data """ class IObjectUI(protocols.Interface): def getEditor(): """ Get the object's editor panel. """ def setExportCallback(self, func): """ Assign an export function """ class IShaderParamUI(protocols.Interface): def getVariance(): """ Returns a shader param that has been modified according to rules specified by the user """ def getEditor(): """ returns the UI for the shader parameter """ class IShaderParamEditor(protocols.Interface): def setValue(): """ Set the value of the parameter """ def getValue(): """ get the value of the parameter """ class Property: protocols.advise(instancesProvide=[IProperty]) height = 27 # should work for most def __init__(self, value, xml = None): self.value = value self.saveable = True self.labelWidth = 0 self.editorWidth = 0 self.isRenderable = False def setHeight(self, height): self.height = height def setWidth(self, width): self.width = width def setCustomWidth(self, width): self.editorWidth = width[0] self.labelWidth = width[1] def setName(self, name): self.name = name def getName(self): return self.name def setValue(self, value): self.value = value def setDefault(self, default): self.default = default def getValue(self): return self.value def toXML(self, xml): xmlProp = xml.createElement("property") xmlProp.setAttribute("type", type(self.getValue()).__name__) xmlProp.setAttribute("value", str(self.getValue())) #print "set property of type ", type(self.value), " to ", type(str(self.value).__name__) return xmlProp # interface for complex properties def getEditor(self): return self.value.obj.getEditor() def getStrValue(self): return self.value.obj.getStrValue() def registerCallback(self, signal, function): # this is a pass-through to maintain abstraction #if self.value.__dict__.has_key("registerCallback"): # this will bypass problems with missing values, till I fix my implementation to full provide an interface print "setting callback for shader!" self.value.registerCallback(signal, function) # Properties class StringProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[str]) pass class IntProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[int]) pass class FloatProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[float]) pass class ColorProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[list]) def toXML(self, xml): xmlProp = xml.createElement("property") xmlProp.setAttribute("type", type(self.getValue()).__name__) xmlProp.setAttribute("red", str(self.getValue()[0])) xmlProp.setAttribute("green", str(self.getValue()[1])) xmlProp.setAttribute("blue", str(self.getValue()[2])) #print "set property of type ", type(self.value), " to ", type(str(self.value).__name__) print self.getValue() return xmlProp class DictProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[dict]) def __init__(self, value): self.labelWidth = 0 self.editorWidth = 0 self.saveable = True # what do I do here? I want this to actually be converted to a menu # thus # sort the array keys self.valueDict = value self.keyList = value.keys() self.value = value[self.keyList[0]] # get the first value in the dictionary # sort the key list, but decide how to sort it based on the type of the values provided. if isinstance(self.value, int): newList = [] for key in self.keyList: newList.append(int(key)) newList.sort() self.keyList = [] for key in newList: self.keyList.append(str(key)) elif isinstance(self.value, float): newList = [] for key in keyList: newList.append(float(key)) newList.sort() self.keyList = [] for key in newList: self.keyLIst.append(str(key)) else: self.keyList.sort() #only strings, so sort accordingly def getKeys(self): return self.keyList # keylist may or may not be sorted def getValueByKey(self, key): return self.valueDict[key] class BooleanProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[bool]) pass class ShaderProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[BtoRShaderType]) def __init__(self, value, xml = None): sdict = globals() self.settings = sdict["instBtoRSettings"] Property.__init__(self, value, xml = xml) self.saveable = False def initShader(self, useXML = False, xml = None, shaderName = None, parmList = None, shaderFileName = None): # this should always have a shader, so s_type = self.value.getObject().s_type #print "Initializing a ", s_type, " shader" try: if self.settings.use_slparams: shaderPath = self.settings.getShaderSearchPaths()[0] initialized = True if useXML: shader = cgkit.rmshader.RMShader(xml.getAttribute("path").encode("ascii")) # this would actually be a problem. I'm not always 100% that I'll have a shader source file and so much rely # also on the shader name to get this in cases where a custom shader was entered. parms = xml.getElementsByTagName("Param") self.populateShaderParams(parms, shader, initialized) else: shader = cgkit.rmshader.RMShader(shaderFileName) self.populateShaderParamsList(parmList, shader, initialized) else: initialized = False shader = cgkit.rmshader.RMShader(shaderName) self.populateShaderParamsList(parmList, shader, initialized) shader = btor.BtoRMain.GenericShader(shader, s_type, self) except: traceback.print_exc() shader = btor.BtoRMain.GenericShader(None, s_type, self) self.value = BtoRShaderType(shader) self.editor.setValue(shader.getStrValue()) # I should probably update the property editor here def populateShaderParams(self, parms, shader, initialized): print parms for parm in parms: convtypes = {"float":"double", "string":"string", "color":"vec3", "point":"vec3", "vector":"vec3", "normal":"vec3", "matrix":"mat4"} p_type = parm.getAttribute("type") p_name = parm.getAttribute("name") if p_type == "float": parm_value = float(parm.getAttribute("value")) elif p_type == "string": parm_value = parm.getAttribute("value").encode("ascii") elif p_type == "color": parm_value = cgkit.cgtypes.vec3(float(parm.getAttribute("red")), float(parm.getAttribute("green")), float(parm.getAttribute("blue"))) elif p_type in ["normal", "vector", "point"]: parm_value = cgkit.cgtypes.vec3(float(parm.getAttribute("x")), float(parm.getAttribute("y")), float(parm.getAttribute("z"))) elif p_type == "matrix": mat_value = [] sep = "_" index = 0 for x in range(4): for y in range(4): mat_value.append(float(parmNode.getAttribute(sep.join(["value", index])))) index = index + 1 parm_value = cgkit.cgtypes.mat4(mat_value[0], mat_value[1], mat_value[2], mat_value[3], mat_value[4], mat_value[5], mat_value[6], mat_value[7], mat_value[8], mat_value[9], mat_value[10], mat_value[11], mat_value[12], mat_value[13], mat_value[14], mat_value[15]) if initialized == False: shader.declare(p_name, type=convtypes[p_type], default=parm_value) # shader.createSlot(p_name, convtypes[p_type], None, parm_value) # Here we set the default value to the parameters incoming value. # print "Assigning parameter value ", p_name, " = ", parm_value # and set the value setattr(shader, p_name, parm_value) def populateShaderParamsList(self, params, shader, initialized): convtypes = {"float":"double", "str":"string", "vec3":"vec3", "matrix":"mat4"} val = "" # print dir(shader) for key in params.keys(): param = params[key] if isinstance(param, list): if len(param) == 3: # color, vector, normal, or point ptype = "vec3" val = cgkit.cgtypes.vec3(float(param[0]), float(param[1]), float(param[2])) elif len(param) == 16: # matrix val = cgkit.cgtypes.mat4(float(param[0]), float(param[2]), float(param[2]), float(param[3]), float(param[4]), float(param[5]), float(param[6]), float(param[7]), float(param[8]), float(param[9]), float(param[10]), float(param[11]), float(param[12]), float(param[13]), float(param[14]), float(param[15])) elif isinstance(param, float) or isinstance(param, int): ptype = "float" val = float(param) elif isinstance(param, str): ptype = "string" val = param if initialized == False: shader.declare(key,type=convtypes[ptype], default=val ) # declare the shader here! # shader.createSlot(key + "_slot", convtypes[ptype], None, val) # Here we set the default value to the parameters incoming value... # print "Setting shader parameter ", key, " to ", val # and set the value # print key, ", ", val if param != None: setattr(shader, key, val) def showEditor(self): # print self.value.getObject() self.value.getObject().showEditor() class MaterialProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[BtoRMaterialType]) height = 65 pass def getValue(self): return self.editor.value.getValue() class VectorProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[cgkit.cgtypes.vec3]) pass class RotationProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[BtoRRotationType]) pass class MatrixProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[cgkit.cgtypes.mat4]) height = 95 pass class CustomRIBProperty(Property): protocols.advise(instancesProvide=[IProperty], asAdapterForTypes=[BtoRCustomRIB]) height = 20 pass # Property Editors class PropertyEditor: # this needs no interface declaration, since all this is doing is providing a baseclass fontsize = 'small' def __init__(self, property, suppressLabel = False): self.property = property self.height = self.property.height if self.property.labelWidth > 0: pWidth = self.property.editorWidth lWidth = self.property.labelWidth else: pWidth = self.property.width self.editor = ui.Panel(0, 0, pWidth, self.height, "", "", None, False) self.editor.hasHeader = False self.editor.shadowed = False self.editor.normaColor = [128, 128, 128, 0] self.editor.hoverColor = [128, 128, 128, 0] self.editor.outlined = True self.editor.cornermask = 0 self.editor.outlined = True self.editor.cornermask = 0 if not suppressLabel: self.label = ui.Label(2, 3, self.property.getName(), self.property.getName(), self.editor, True, fontsize = self.fontsize) self.label.fontsize = 'small' self.func = None def setValue(self, value): self.property.setValue(value) self.value.setValue(value) def setParent(self, parent): self.editor.parent = parent self.editor.invalid = True def setPropertyCallback(self, func): self.func = func def getValue(self): return self.property.getValue() def updateValue(self, obj): if type(obj) == ui.TextField: if obj.type == "int": self.property.setValue(int(obj.getValue())) elif obj.type == "float": self.property.setValue(float(obj.getValue())) else: self.property.setValue(obj.getValue()) if self.func != None: self.func() # invoke the update function for this property def getEditor(self): return self.editor class BasicPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[IntProperty, FloatProperty, BtoRFloatParam]) """ A basic property, a label and a text box """ def __init__(self, property): PropertyEditor.__init__(self, property) width = property.width height = property.height self.value = ui.TextField(width / 2, 0, width / 2, height, self.property.getName(), self.property.getValue(), self.editor, True, fontsize = self.fontsize) self.value.registerCallback("update", self.updateValue) # protocols.declareAdapter(BasicPropertyEditor, [IPropertyEditor], forTypes=[StringProperty, IntProperty, FloatProperty]) class StringPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[StringProperty, BtoRStringParam], factoryMethod="routeProperty") """ A basic property, a label and a text box """ def __init__(self, property): sdict = globals() # self.scene = sdict["instBtoRSceneSettings"] PropertyEditor.__init__(self, property) width = property.width height = property.height self.value = ui.TextField(width / 2, 0, width / 2, height, self.property.getName(), self.property.getValue(), self.editor, True, fontsize = self.fontsize) self.value.registerCallback("update", self.updateValue) self.value.registerCallback("right_click", self.showAssets) def showAssets(self, obj): #self.scene.showAssets(self) print "yo!" @classmethod def routeProperty(cls, obj): if isinstance(obj, StringProperty): #print "Returning a normal string property!" # init self, I'm a string return cls(obj) else: name = obj.getName() value = obj.getValue() #print "Finding editor for ", name # color space or space property most likely, react accordingly spaceMatch = re.compile("space|proj", re.I) fileMatch = re.compile("tex|map|name|refl", re.I) if spaceMatch.search(name): # extend this to provide a nice list if value in ColorSpacePropertyEditor.ColorSpaces: return ColorSpacePropertyEditor(obj) elif value in SpacePropertyEditor.Spaces: return SpacePropertyEditor(obj) elif value in ProjectionPropertyEditor.Projections: return ProjectionPropertyEditor(obj) if fileMatch.search(name): return FilePropertyEditor(obj) else: return cls(obj) class FilePropertyEditor(PropertyEditor): # file property editor should have access to the global asset list. # environment & shadow maps should apply to the *assigned* object # lightsources are slightly weird because they need to be both local and global (for AO purposes) # environment maps are actually global namely because of multiple assignment protocols.advise(instancesProvide=[IPropertyEditor]) def __init__(self, property): # remember to add an override button to all custom types! sdict = globals() self.evt_manager = sdict["instBtoREvtManager"] PropertyEditor.__init__(self, property) width = self.property.width height = self.property.height self.value = ui.TextField(width / 2, 0, width / 2 - height, height, self.property.getName(), self.property.getStrValue(), self.editor, True, fontsize = self.fontsize) # self.value.Enabled = False butX = self.value.x + self.value.width + 1 self.triggerButton = ui.Button(butX, 0, height, height, "...", "...", 'small', self.editor, True) self.triggerButton.shadowed = False self.triggerButton.registerCallback("release", self.browse) # if isinstance(property, StringProperty): self.value.registerCallback("update", self.updateValue) # for (this( property editor, I should set a back reference so I can update the text field with the shader when it's initialized self.property.editor = self self.value.registerCallback("right_click", self.showAssets) def showAssets(self, obj): #self.scene.showAssets(self) print "yo!" def browse(self, button): """ Browser """ class PathPropertyEditor(PropertyEditor): """ Directory Browser """ def __init__(self, property): sdict = globals() self.evt_manager = sdict["instBtoREvtManager"] width = self.property.width height = self.property.height self.value = ui.TextField(0, 0, width - 25, height, self.property.getName(), self.property.getStrValue(), self.editor, True, fontsize = self.fontsize) self.triggerButton = ui.Button(width - 25, 0, 25, height, "...", "...", 'small', self.editor, True) self.triggerButton.shadowed = False self.triggerButton.registerCallback("release", self.browsePath) self.value.registerCallback("update", self.updateValue) self.property.editor = self def browsePath(self, button): Blender.Window.FileSelector(self.select, 'Choose any file') def select(self, file): path = os.path.dirname(file) self.value.setValue(path) self.updateValue(path) class SpacePropertyEditor(PropertyEditor): Spaces = ['current', 'object', 'shader', 'world', 'camera', 'screen', 'raster', 'NDC'] def __init__(self, property): PropertyEditor.__init__(self, property) width = property.width height = property.height self.value = ui.Menu(width / 2, 2, width / 2, height - 4, self.property.getName(), self.Spaces, self.editor, True, fontsize = self.fontsize) self.value.registerCallback("select", self.updateValue) self.value.setShadowed(False) def setValue(self, value): # menu editors need to be slightly different self.property.setValue(value) self.value.setValueString(value) def renameMenuItem(self, idx, name): self.value.renameElement(idx, name) class ColorSpacePropertyEditor(PropertyEditor): ColorSpaces = ['rgb', 'hsv', 'hsl', 'YIQ', 'xyz', 'xyY'] def __init__(self, property): PropertyEditor.__init__(self, property) width = property.width height = property.height self.value = ui.Menu(width / 2, 2, width / 2, height - 4, self.property.getName(), self.ColorSpaces, self.editor, True, fontsize = self.fontsize) self.value.setShadowed(False) self.value.registerCallback("select", self.updateValue) def setValue(self, value): # menu editors need to be slightly different self.property.setValue(value) self.value.setValueString(value) def renameMenuItem(self, idx, name): self.value.renameElement(idx, name) class ProjectionPropertyEditor(PropertyEditor): Projections = ['st', 'planar', 'perspective', 'spherical', 'cylindrical'] def __init__(self, property): PropertyEditor.__init__(self, property) width = property.width height = property.height self.value = ui.Menu(width / 2, 2, width / 2, height - 4, self.property.getName(), self.Projections, self.editor, True, fontsize = self.fontsize) self.value.registerCallback("select", self.updateValue) self.value.setShadowed(False) def setValue(self, value): # menu editors need to be slightly different self.property.setValue(value) self.value.setValueString(value) def renameMenuItem(self, idx, name): self.value.renameElement(idx, name) class MenuPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[DictProperty]) def __init__(self, property): PropertyEditor.__init__(self, property) width = self.property.width height = self.property.height menu = self.property.getKeys() defVal = None if "btor:default" in menu: defVal = self.property.getValueByKey("btor:default") # strip the default option out of the menu now menu.remove("btor:default") self.value = ui.Menu(width / 2, 2, width / 2, height - 4, self.property.getName(), menu, self.editor, True, fontsize = self.fontsize) self.value.registerCallback("select", self.updateValue) self.value.setShadowed(False) self.property.setValue(self.value.getValue()) if defVal != None: self.value.setValueString(defVal) # assigns the default value def setValue(self, value): # menu editors need to be slightly different self.property.setValue(value) self.value.setValueString(value) def updateMenu(self, menu): # reinit the menu, but keep the selected indesx index = self.value.getSelectedIndex() self.value.re_init(menu) if len(self.value.elements) > index: self.value.setValue(0) else: self.value.setValue(index) def renameMenuItem(self, idx, name): self.value.renameElement(idx, name) #protocols.declareAdapter(MenuPropertyEditor, [IPropertyEditor], forTypes=[DictProperty]) class ColorPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[ColorProperty, BtoRColorParam]) def __init__(self, property): PropertyEditor.__init__(self, property) width = self.property.width height = self.property.height color = self.property.getValue() print color # I need 3 RGB values inc = (width / 2) / 4 self.R = ui.TextField((width / 2), 0, inc -1, height, "Red", color[0], self.editor, True) self.G = ui.TextField((width / 2) + inc, 0, inc -1, height, "Green", color[1], self.editor, True) self.B = ui.TextField((width / 2) + (inc * 2), 0, inc -1, height, "Blue", color[2], self.editor, True) self.colorButton = ui.ColorButton((width / 2) + (inc * 3), 0, inc - 4, height - 2, "Color", color, self.editor, True) self.colorButton.outlined = True self.R.registerCallback("update", self.updateColor) self.G.registerCallback("update", self.updateColor) self.B.registerCallback("update", self.updateColor) self.updateColor(None) self.colorButton.picker.registerCallback("ok", self.updateFields) self.value = self.colorButton def updateFields(self, color): self.R.setValue(float(float(color.value[0]) / 255)) self.G.setValue(float(float(color.value[1]) / 255)) self.B.setValue(float(float(color.value[2]) / 255)) self.property.setValue([float(float(color.value[0])/255), float(float(color.value[1]) / 255), float(float(color.value[2]) / 255)]) def updateColor(self, obj): # convert to RGB 255 r_s = float(self.R.getValue()) g_s = float(self.G.getValue()) b_s = float(self.B.getValue()) if float(r_s) > 1: r = int(r_s) else: r = float(r_s) * 255 if float(g_s) > 1: g = float(g_s) else: g = float(g_s) * 255 if float(b_s) > 1: b = float(b_s) else: b = float(b_s) * 255 rgb = [r, g, b, 255] self.colorButton.setValue(rgb) self.property.setValue([self.R.getValue(), self.G.getValue(), self.B.getValue()]) def setValue(self, color): print "Setting color", color self.R.setValue(color[0]) self.G.setValue(color[1]) self.B.setValue(color[2]) self.updateColor(None) # self.property.setValue([float(float(color.value[0])/255), float(float(color.value[1]) / 255), float(float(color.value[2]) / 255)]) #protocols.declareAdapter(ColorPropertyEditor, [IPropertyEditor], forTypes=[ColorProperty]) class BooleanPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[BooleanProperty]) def __init__(self, property): PropertyEditor.__init__(self, property) width = self.property.width height = self.property.height self.value = ui.CheckBox(width / 2 + ((width / 2) - 55), 5, "", " ", property.getValue(), self.editor, True, fontsize = self.fontsize) self.value.height = 15 self.value.x_offset = 2 self.value.y_offset = 0 #self.value.outlined = True self.value.registerCallback("release", self.updateValue) # again, the property should handle this #protocols.declareAdapter(BooleanPropertyEditor, [IPropertyEditor], forTypes=[BooleanProperty]) class ShaderPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[ShaderProperty]) def __init__(self, property): sdict = globals() self.evt_manager = sdict["instBtoREvtManager"] PropertyEditor.__init__(self, property) width = self.property.width height = self.property.height self.value = ui.TextField(width / 2, 0, width / 2 - height, height, self.property.getName(), self.property.getStrValue(), self.editor, True, fontsize = self.fontsize) self.value.Enabled = False butX = self.value.x + self.value.width + 1 self.triggerButton = ui.Button(butX, 0, height, height, "...", "...", 'small', self.editor, True) self.triggerButton.shadowed = False self.triggerButton.registerCallback("release", self.showPropertyEditor) self.property.registerCallback("update", self.updateValue) # for (this( property editor, I should set a back reference so I can update the text field with the shader when it's initialized self.property.editor = self # now I need to set a callback to get the shader value when it's updated def showPropertyEditor(self, obj): # instead of doing this, I need to do self.property.showEditor() def updateValue(self, obj): print "Updating property!" self.setValue(obj.getShaderName()) def setValue(self, value): # self.property.setValue(value) self.value.setValue(value) class MaterialPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[MaterialProperty]) def __init__(self, property): sdict = globals() self.evt_manager = sdict["instBtoREvtManager"] self.materials = sdict["instBtoRMaterials"] self.property = property width = self.property.width self.height = self.property.height self.editor = ui.Button(0, 0, width, self.height, "Assigned Material:", "Assigned Material:", 'normal', None, False) self.editor.registerCallback("release", self.showMaterialSelector) self.editor.textlocation = 1 self.editor.shadowed = False #self.editor.normalColor = [128, 128, 128, 0] #self.editor.hoverColor = [128, 128, 128, 0] self.editor.outlined = True self.editor.cornermask = 0 self.editor.outlined = True self.editor.cornermask = 0 self.value = ui.Label(2, 25, "None Assigned:", "None Assigned:", self.editor, True, fontsize = self.fontsize) self.value.fontsize = 'small' self.value.transparent = True self.func = None width = self.property.width height = self.property.height self.property.editor = self def showPropertyEditor(self, obj): self.evt_manager.addElement(self.property.getEditor()) def setValue(self, material): #self.evt_manager.removeElement(self.mat_selector) # the button returned has the material name! # So all i need to do now is... if material != None and material != "None Assigned:": self.value.setValue(material) self.material = self.materials.getMaterial(material) # this should be the material name! self.editor.setTitle(material) self.editor.image = self.materials.getMaterial(material).image def updateValue(self, obj): if material != None: self.material = material if preInit == False: self.objData["material"] = material.name self.objEditor.materialButton.setTitle(material.name) self.objEditor.setImage(material.image) def setMaterial(self, material): if material != None: self.value.setValue(material.material.name) self.property.setValue(material.material.name) if self.editor.image == None: self.editor.image = ui.Image(150, 5, 56, 56, material.image, self.editor, False) else: self.editor.image = ui.Image(150, 5, 56, 56, material.image, self.editor, False) def showMaterialSelector(self, obj): """ Display a material selector window. """ # I should have loaded materials here, so let's do this. if self.materials.getMaterialCount() < 1: self.evt_manager.showConfirmDialog("No materials defined!", "You haven't defined any materials!", None, False) else: self.evt_manager.addElement(self.materials.getSelector()) def setImage(self, image): buttonImage = ui.Image(120, 5, 56, 56, image, self.materialButton, False) self.editor.image = buttonImage def selectMaterial(self, obj, matName = None): """ material selection callback """ self.evt_manager.removeElement(self.mat_selector) # the button returned has the material name! # So all i need to do now is... if matName != None: self.material = matName self.materialButton.setTitle(matName) self.materialButton.image = self.materials.getMaterial(matName).image else: self.material = obj.title # self.scene.object_data[self.objectName.getValue()]["material"] = obj.title # Assign the material to the object adapter # print "Assigned material ", self.scene.object_data[self.objectName.getValue()]["material"], " to ", self.objectName.getValue() self.materialButton.title = obj.title self.materialButton.image = ui.Image(120, 5, 56, 56, obj.image.image, self.materialButton, False) class VectorPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[VectorProperty, BtoRPointParam, BtoRVectorParam, BtoRNormalParam]) def __init__(self, property): PropertyEditor.__init__(self, property) width = property.width height = property.height start = width / 2 inc = (width / 2) / 3 - 1 self.x = ui.TextField(start, 0, inc - 1, height, "X", self.property.getValue()[0], self.editor, True, fontsize = self.fontsize) self.x.registerCallback("update", self.updateValue) start = start + inc self.y = ui.TextField(start, 0, inc - 1, height, "Y", self.property.getValue()[1], self.editor, True, fontsize = self.fontsize) self.y.registerCallback("update", self.updateValue) start = start + inc self.z = ui.TextField(start, 0, inc - 1, height, "Z", self.property.getValue()[2], self.editor, True, fontsize = self.fontsize) self.z.registerCallback("update", self.updateValue) def updateValue(self, obj): self.property.value = cgkit.cgtypes.vec3(float(self.x.getValue()), float(self.y.getValue()), float(self.z.getValue())) def setValue(self, value): self.x.setValue(value[0]) self.y.setValue(value[1]) self.z.setValue(value[2]) self.property.setValue(value) class RotationPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[RotationProperty]) def __init__(self, property): PropertyEditor.__init__(self, property) width = property.width height = property.height start = width / 2 inc = (width / 2) / 4 - 1 self.angle = ui.TextField(start, 0, inc - 1, height, "angle", self.property.getValue().getObject()[0], self.editor, True, fontsize = self.fontsize) self.angle.registerCallback("update", self.updateValue) start = start + inc self.x = ui.CheckBox(start + 1, 3, "X", "X",self.property.getValue().getObject()[1], self.editor, True, fontsize = self.fontsize) self.x.registerCallback("release", self.updateValue) start = start + inc self.x.elements[0].x = 12 self.y = ui.CheckBox(start + 1, 3, "Y", "Y", self.property.getValue().getObject()[2], self.editor, True, fontsize = self.fontsize) self.y.registerCallback("release", self.updateValue) start = start + inc self.y.elements[0].x = 12 self.z = ui.CheckBox(start + 1, 3, "Z", "Z", self.property.getValue().getObject()[3], self.editor, True, fontsize = self.fontsize) self.z.registerCallback("release", self.updateValue) self.z.elements[0].x = 12 def updateValue(self, obj): self.property.value.obj = [float(self.angle.getValue()), self.x.getValue(), self.y.getValue(), self.z.getValue()] class MatrixPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[MatrixProperty, BtoRMatrixParam]) def __init__(self, property): PropertyEditor.__init__(self, property) width = property.width height = property.height x = width / 2 self.table = ui.Table(x, 0, x, 65, "table", property.getValue(), self.editor, True) class CustomRIBPropertyEditor(PropertyEditor): protocols.advise(instancesProvide=[IPropertyEditor], asAdapterForTypes=[CustomRIBProperty]) def __init__(self, property): PropertyEditor.__init__(self, property) width = property.width height = property.height x = width / 4 self.value = ui.TextField(width / 2, 0, x, height, "", "No custom RIB", self.editor, True) self.value.Enabled = False self.trigger = ui.Button(x + 1, 0, x, height, "", "...", self.editor, True) # Object Adapters class ObjectAdapter: # baseclass for all Blender objects protocols.advise(instancesProvide=[IObjectAdapter], asAdapterForTypes=[BtoRLattice, BtoRArmature, BtoRBasicObject, BtoREmpty, BtoRWave]) def __init__(self, obj): """ initialize an object adapter based on the incoming object """ dict = globals() self.settings = dict["instBtoRSettings"] self.evt_manager = dict["instBtoREvtManager"] self.scene = dict["instBtoRSceneSettings"] self.materials = dict["instBtoRMaterials"] self.lighting = dict["instBtoRLightManager"] self.object = obj.obj # this is a BtoR object instance # why can't I use the objectUI protocol to get an appropriate editor? # let's do that! self.objEditor = protocols.adapt(obj, IObjectUI) self.properties = self.objEditor.properties self.editors = self.objEditor.editors self.rendererAtts = self.objEditor.rendererAtts self.initObjectData() # initializes object data # self.objType = obj.obj.getType() self.changed = False def renderAttributes(self): # iterate renderer specific attributes attributes = self.settings.getRendererAttributes() for attribute in attributes: render = False if attribute in self.rendererAtts: attArray = attribute.split(":") val = self.rendererAtts[attribute].getValue() print "Value is:", val print "Default is:", attributes[attribute][2] if val != attributes[attribute][2]: # not default value, render the attribute x = attArray[1].find("[") if x > -1: z = len(attArray[1] - 1) arrLen = int(attArray[1][x + 1:z]) v = [] sVal = val.split(" ") try: if "integer" in attArray[1] or "int" in attArray[1]: for vIdx in range(arrLen): v.append(int(sVal[vIdx])) elif "string" in attArray[1]: for vIdx in range(arrLen): v.append(sVal[vIdx]) elif "float" in arrArray[1]: v.append(float(sVal[vIdx])) val = v render = True except: render = False else: try: if attArray[1] == "integer" or attArray[1] == "int": val = [int(val)] render = True elif attArray[1] == "string": val = [str(val)] render = True elif attArray[1] == "float": val = [float(val)] render = True else: render = False except: render = False if render: print "arrayVal: ", attArray[0], "arrayVal", attArray[1], " arrayVal: ", attArray[2], " Value:", val if attArray[0] == "" or attArray[0] == None or attArray[2] == "" or val == "" or val == None: print "Bad things! Attribute didn't work!" else: try: ri.RiAttribute(attArray[0], attArray[2], val) except: print "Attribute parse error!" attOut = "Attribute " + '"' + attArray[0] + '" "' + attArray[2] + '" "' + val[0] print attOut #ri._ribout.write(attOut) def temp(self): if len(self.rendererAtts) > 0: for att in self.rendererAtts: # for each attribute, get the value and render it attArray = att.split(":") # splits the option by : chars # propArray[0] = category # propArray[1] = type # propArray[2] = name render = True # get the attribute value val = self.getProperty(property) # get the value print val print rproperties[property] if val != rproperties[property][2]: x = propArray[1].find("[") if x > -1: # array value # "integer[2]" print propArray z = len(propArray[1]) - 1 arrLen = int(propArray[1][x + 1:z]) v = [] sVal = val.split(" ") if "integer" in propArray[1] or "int" in propArray[1]: for vIdx in range(arrLen): v.append(int(sVal[vIdx])) elif "string" in propArray[1]: for vIdx in range(arrLen): v.append(sVal[vIdx]) elif "float" in propArray[1]: for vIdx in range(arrLen): v.append(float(sVal[vIdx])) val = v else: # not an array, lets' do this differently if propArray[1] == "integer" or propArray[1] == "int": val = [int(val)] elif propArray[1] == "float": val = [float(val)] elif propArray[1] == "string": val = [str(val)] else: render = False # here, val should be either an array or not # thus, eval # types and arrays should be ok now # thus, the option call if render: ri.RiOption(propArray[0], propArray[1] + " " + propArray[2], val) def genChecksum(self): pass # convenience method to return the editor object for this adapter def getProperty(self, property): if self.properties.has_key(property): return self.properties[property].getValue() else: return False def setProperty(self, property, value): if self.properties.has_key(property): self.properties[property].setValue(value) def getEditor(self): return self.objEditor def getInfo(self): """ Return the object data for this object. """ #objData = self.scene.object_data[self.objectName.getValue()] for key in self.objData.keys(): print "Key: ", key, " value: ", self.objData[key] def callFactory(self, func, *args, **kws): """ construct a curried function for use with a button """ def curried(*moreargs, **morekws): kws.update(morekws) # the new kws override any keywords in the original # print kws, " ", args return func(*(args + moreargs), **kws) return curried def render(self, shadowPass = False): """ Generate Renderman data from this call. """ # decide here what to do with the object's data. Do I render it using the normal adapter method, or do I do something special for cases involving ReadArchive and what-not? return True def renderAsCamera(self): return True def renderArchive(self): # this should be all that's neccessary # I do need to handle dupliverts situations and animated curves, array modifer, all that crap. if self.__dict__.has_key("archive"): ri.RiBegin(archive) self.render() ri.RiEnd() def initObjectData(self): """ Generate the object data for this object. """ self.objData = {} self.objData["name"] = self.object.getName() self.objData["type"] = self.object.getType() def doCameraTransform(self, axis = None): if axis != None: # I still need to transform based on the light's matrix # get the inverse matrix first # if this is point light, I should probably set the rotation values to zero # step 1, transform the world to left-handed ri.RiScale(-1, 1, 1) ri.RiRotate(180, 0, 1, 0) if axis == "px": ri.RiRotate(90, 0, 1, 0) elif axis == "nx": #ri.RiRotate(-90, 1, 0, 0) ri.RiRotate(-90, 0, 1, 0) elif axis == "py": ri.RiRotate(-90, 1, 0, 0) elif axis == "ny": ri.RiRotate(90, 1, 0, 0) elif axis == "pz": ri.RiRotate(180, 0, 1, 0) cmatrix = self.object.getInverseMatrix() #sMat = Blender.Mathutils.ScaleMatrix(-1, 4, vecX) #rMat = Blender.Mathutils.RotationMatrix(180, 4, "y") #mat = cmatrix * sMat * rMat trans = cmatrix.translationPart() #print "\n" #print "At shadowmap generation for axis:", axis #print "Light translation is", trans #print "\n" ri.RiTranslate(trans) def getRenderDirections(self): return ["px", "py", "pz", "nx", "ny", "nz"] def populateShaderParamsList(self, params, shader, initialized): convtypes = {"float":"double", "str":"string", "vec3":"vec3", "matrix":"mat4"} val = "" # print dir(shader) for key in params.keys(): param = params[key] if isinstance(param, list): if len(param) == 3: # color, vector, normal, or point ptype = "vec3" val = cgkit.cgtypes.vec3(float(param[0]), float(param[1]), float(param[2])) elif len(param) == 16: # matrix ptype = "matrix" val = cgkit.cgtypes.mat4(float(param[0]), float(param[2]), float(param[2]), float(param[3]), float(param[4]), float(param[5]), float(param[6]), float(param[7]), float(param[8]), float(param[9]), float(param[10]), float(param[11]), float(param[12]), float(param[13]), float(param[14]), float(param[15])) elif isinstance(param, float) or isinstance(param, int): ptype = "float" val = float(param) elif isinstance(param, str): ptype = "str" val = param if not initialized: shader.declare(key,type=convtypes[ptype], default=val ) # declare the shader here! # shader.createSlot(key + "_slot", convtypes[ptype], None, val) # Here we set the default value to the parameters incoming value... # print "Setting shader parameter ", key, " to ", val # and set the value # print key, ", ", val if param != None: setattr(shader, key, val) def populateShaderParams(self, parms, shader, initialized): for parm in parms: convtypes = {"float":"double", "string":"string", "color":"vec3", "point":"vec3", "vector":"vec3", "normal":"vec3", "matrix":"mat4"} p_type = parm.getAttribute("type") p_name = parm.getAttribute("name") if p_type == "float": parm_value = float(parm.getAttribute("value")) elif p_type == "string": parm_value = parm.getAttribute("value") elif p_type == "color": parm_value = cgkit.cgtypes.vec3(float(parm.getAttribute("red")), float(parm.getAttribute("green")), float(parm.getAttribute("blue"))) elif p_type in ["normal", "vector", "point"]: parm_value = cgkit.cgtypes.vec3(float(parm.getAttribute("x")), float(parm.getAttribute("y")), float(parm.getAttribute("z"))) elif p_type == "matrix": mat_value = [] sep = "_" index = 0 for x in range(4): for y in range(4): mat_value.append(float(parmNode.getAttribute(sep.join(["value", index])))) index = index + 1 parm_value = cgkit.cgtypes.mat4(mat_value[0], mat_value[1], mat_value[2], mat_value[3], mat_value[4], mat_value[5], mat_value[6], mat_value[7], mat_value[8], mat_value[9], mat_value[10], mat_value[11], mat_value[12], mat_value[13], mat_value[14], mat_value[15]) else: print "No type found, this parameter was a ", p_type parm_value = None if initialized == False: print "Declaring a variable!" shader.declare(p_name, type=convtypes[p_type], default=parm_value) # shader.createSlot(p_name, convtypes[p_type], None, parm_value) # Here we set the default value to the parameters incoming value. # and set the value if parm_value != None: setattr(shader, p_name, parm_value) def checkReset(self): # check the object in question pass # do nothing unless overridden by a subclass interested in object changes def saveData(self, xml): """ Generate an XML representation of this object """ objXml = xml.createElement("Object") #print self.objData for key in self.objData.keys(): if key not in ["lightcolor", "shaderparms", "shaderparams"]: # ignore light colors & shader parameters here. Shader parms are rebuilt from the shader object itself. if isinstance(self.objData[key], int) or isinstance(self.objData[key], float): objXml.setAttribute(key, '%f' % self.objData[key]) else: objXml.setAttribute(key, self.objData[key]) if len(self.properties) > 0: # I've got at least one property for property in self.properties: if self.properties[property].saveable: xmlProp = self.properties[property].toXML(xml) xmlProp.setAttribute("name", property) # set the name attribute here for the moment, but later configure a property to have a name AND a title! objXml.appendChild(xmlProp) if self.__dict__.has_key("shader"): if self.shader.getShaderName() not in ["None Selected", None]: # if I need to add other extra processing, I simply add another case. shaderNode = xml.createElement("shader") # update all the stuff... self.shader.updateShaderParams() shaderNode.setAttribute("name", self.shader.getShaderName()) if self.shader.shader.filename != None: shaderNode.setAttribute("path", os.path.normpath(self.shader.shader.filename)) else: shaderNode.setAttribute("path", "None") print "Shader Parameters: ", self.shader.shader.shaderparams for parm in self.shader.shader.shaderparams: # get the param and stuff into my dict # create the node parmNode = xml.createElement("Param") value = getattr(self.shader.shader, parm) # create an XML element for this value. s_type = self.shader.shader.shaderparams[parm].split()[1] # setup as much of the node element as I can here parmNode.setAttribute("name", parm) parmNode.setAttribute("type", s_type) if s_type == "float": parmNode.setAttribute("value", '%f' % value) elif s_type == "string": parmNode.setAttribute("value", value) elif s_type == "color": parmNode.setAttribute("red", '%f' % value[0]) parmNode.setAttribute("green", '%f' % value[1]) parmNode.setAttribute("blue", '%f' % value[2]) elif s_type in ["point", "normal", "vector"]: parmNode.setAttribute("x", '%f' % value[0]) parmNode.setAttribute("y", '%f' % value[1]) parmNode.setAttribute("z", '%f' % value[2]) elif s_stype == "matrix": sep = "_" index = 0 for x in range(4): for y in range(4): parmNode.setAttribute(sep.join(["value", index]), value[x, y]) index = index + 1 # now commit this node to the shader node shaderNode.appendChild(parmNode) objXml.appendChild(shaderNode) return objXml def loadData(self, xml): """ Recreate this object from an XML representation of it. """ xmlTypes = { "int" : int, "str" : str, "list" : list, "dict": dict, "float" : float, "bool": bool } self.objData = {} atts = xml.attributes # setup the attributes first, since I will need the shader filename if I'm using slparams for att in range(atts.length): xAtt = atts.item(att) self.objData[xAtt.name] = xAtt.value.encode("ascii") xmlProperties = xml.getElementsByTagName("property") # what I should really do is move the properties from the object's UI to the object adapter, since that's really where I control the data. for xmlProperty in xmlProperties: # each of these is a property element propertyName = xmlProperty.getAttribute("name").encode("ascii") xmlType = xmlProperty.getAttribute("type").encode("ascii") xmlValue= xmlProperty.getAttribute("value").encode("ascii") if xmlType == "bool": propertyValue = xmlTypes[xmlType](eval(xmlValue)) elif xmlType == "list": propertyValue = [float(eval(xmlProperty.getAttribute("red").encode("ascii"))), float(eval(xmlProperty.getAttribute("green").encode("ascii"))), float(eval(xmlProperty.getAttribute("blue").encode("ascii")))] # rebuild a pretty color! # print propertyValue else: propertyValue = xmlTypes[xmlType](xmlValue) # print "Value for property ", propertyName, " is ", propertyValue self.editors[propertyName].setValue(propertyValue) # # setup the properties for this objects self.initMaterial() # test for any shaders parms = xml.getElementsByTagName("shader") if len(parms) > 0: # I've got some shader or another # strip the params out and stuff them into the "shaderparams" key self.initShader(useXML = True, xml = parms) self.checkReset() def initShader(self, useXML = False, xml = None): try: if self.settings.use_slparams: shaderPath = self.settings.getShaderSearchPaths()[0] # try to find the shader file in question. In truth, the default files should exist # get the first shader search path for pre-generated shaders initialized = True shader = cgkit.rmshader.RMShader(self.objData["shaderfilename"]) if useXML: self.populateShaderParams(xml, shader, initialized) else: if self.objData.has_key("shaderparms"): self.populateShaderParamsList(self.objData["shaderparms"], shader, initialized) self.shader = btor.BtoRMain.GenericShader(shader, self.shader_type, self) else: initialized = False shader = cgkit.rmshader.RMShader() # blank shader here # sine I'm not relying upon slparams to build the light shader params for me, I must instead do something like # file = os.path.basename(self.objData["shaderfilename"]) # so here, let's see if the light shader I want is actually in the first shader path..that should be in the filename # path = self.settings.getPathForShader(self.objData["shaderfilename"], self.shader_type)# here I should have a path #if path == None: # self.evt_manager.showErrorDialog("Missing shader or shader path!", "Error: The default " + self.shader_type + " shader(s) could not be found! Check your setup.") # raise ValueError #else: self.shader = btor.BtoRMain.GenericShader(None, self.shader_type, self) # #if self.shader.searchPaths.getValue() != path: # self.shader.setSearchPath(path) # set the path to the path where my light shader is. Should be the *first* path listed, if not, move on # hopefully this doesn't break stuff. #else: # with luck, this is always the case path = os.path.split(self.objData["shaderfilename"]) self.shader.setSearchPath(path[0]) self.shader.setShader(path[1]) # this should update the controls # self.shader.shader.shadername = self.objData["shaderparms"]["shadername"] if self.shader.shader != None: self.shader.shader.filename = self.objData["shaderfilename"] self.populateShaderParamsList(self.objData["shaderparms"], self.shader.shader, True) # so I can now setup the parameters except: traceback.print_exc() self.shader = btor.BtoRMain.GenericShader(None, self.shader_type, self) self.objEditor.setShader(self.shader) # self.objEditor.shaderButton.setTitle(self.shader.getShaderName()) def setMaterial(self, material, preInit = False): if material != None: self.material = material if preInit == False: self.objData["material"] = material.material.name self.editors["material"].setMaterial(material) def initMaterial(self): if self.objData.has_key("material"): # print self.objData["material"], " was found" if self.objData["material"] != None and self.objData["material"] != "None": self.editors["material"].setMaterial(self.materials.getMaterial(self.objData["material"])) def renderMaterial(self): Bmaterial = self.materials.getMaterial(self.getProperty("material")) if Bmaterial == "None" or Bmaterial == None: # generate a default material - make sure to setup a default material button in the scene settings dialog print "rendering matte material" ri.RiColor(cgkit.cgtypes.vec3(1.0, 1.0, 1.0)) ri.RiOpacity(cgkit.cgtypes.vec3(1.0, 1.0, 1.0)) ri.RiSurface("matte", { "Ka" : 1.0, "Kd" : 1.0 }) else: if Bmaterial != None: # test for a transform translated = False rotated = False scaled = False transform = False translation = Bmaterial.getProperty("Translation") rotation = Bmaterial.getProperty("Rotation").obj scale = Bmaterial.getProperty("Scale") print "material transform info is:" print translation print rotation print scale if translation[0] <> 0.0 or translation[1] <> 0.0 or translation[1] <> 0.0: translated = True transform = True if rotation[1] or rotation[2] or rotation[3]: rotated = True transform = True if scale[0] != 0.0 or scale[1] != 0.0 or scale[2] != 0.0: scaled = True transform = True if transform: ri.RiTransformBegin() if translated: ri.RiTranslate(translation) if rotated: # here I have to figure out the axis of rotation. x, y, z = 0, 0, 0 if rotation[1]: x = 1 if rotation[2]: y = 1 if rotation[3]: z = 1 ri.RiRotate(rotation[0], x, y, z) if scaled: print "material scale is ", scale ri.RiScale(scale[0], scale[1], scale[2]) material = Bmaterial.material Bmaterial.getProperty("Surface").getObject().updateShaderParams() Bmaterial.getProperty("Displacement").getObject().updateShaderParams() Bmaterial.getProperty("Volume").getObject().updateShaderParams() ri.RiColor(Bmaterial.getProperty("color")) ri.RiOpacity(Bmaterial.getProperty("opacity")) # thus if material.surfaceShaderName() != None: ri.RiSurface(material.surfaceShaderName(), material.surfaceShaderParams(0)) if material.displacementShaderName() != None: ri.RiDisplacement(material.displacementShaderName(), material.displacementShaderParams(0)) if material.interiorShaderName() != None: ri.RiAtmosphere(material.interiorShaderName(), material.interiorShaderParams(0)) if transform: ri.RiTransformEnd() class MeshAdapter(ObjectAdapter): """ BtoR mesh Adapter """ protocols.advise(instancesProvide=[IObjectAdapter], asAdapterForTypes=[BtoRMesh]) def __init__(self, object): """ Initialize a mesh export adapter """ ObjectAdapter.__init__(self, object) def render(self, shadowPass = False, envPass = False, renderLayers = []): render = False renderLayer = False illumList = [] # test for layer visibility first if not self.scene.getProperty("RenderLayers"): for layer in self.object.layers: if layer in self.scene.layers: # object exists in at least one layer that's enabled renderLayer = True break else: renderLayer = True if renderLayer: if shadowPass: if self.properties["RenderInShadowPass"].getValue(): if self.lighting.getProperty("layerLighting"): # if I'm using layerLighting, then shadowPass rendering is only neccessary if any lights are illuminating this object. for layer in self.object.layers: if layer in self.scene.currentLightLayers: render = True break # first one wins else: # if I'm not using layerLighting, then render always if RenderInShadowPass is true render = True elif envPass: if self.properties["RenderInEnvMaps"].getValue(): render = True else: if self.lighting.getProperty("layerLighting"): # if I'm using layerLighting, then I know that all of my lights are going to be OFF by default # I need to turn some lights on and off here for layer in self.object.layers: for light in self.scene.lightingLayers[layer]: if light not in illumList: illumList.append(light) render = True if render: if not shadowPass: if len(illumList) > 0: ri.RiAttributeBegin() # push the graphics state. All lights should be off for light in illumList: ri.RiIlluminate(light, ri.RI_TRUE) # immediately test for dupliverts if self.object.enableDupVerts and len(self.object.DupObjects) > 0: # I might have dupliverts, react accordingly # test for rotation by object normal, etc. # step 1, create an object definition objSequence = ri.RiObjectBegin() # keep the sequence number for referencing the instances # don't render materials here, only the object data, and for safety's sake, keep at the world origin self.renderMeshData() ri.RiObjectEnd() # now for the instances objs = self.object.DupObjects ri.RiAttributeBegin() # I do this out here, because the material attribute will remain the same for all instances # unless of course I come up with a way of specifying a different material per instance...which would be really interesting. self.renderMaterial() # render the material instance = 1 for obj in objs: ri.RiTransformBegin() # transform the object ri.RiAttributeBegin() ri.RiAttribute("identifier", "name", self.objData["name"] + instance) if not shadowPass: self.renderAttributes() ri.RiTransform(obj[1]) # that should be the duplicated matrix and in fact should take into account rotations and scaling, all that ri.RiObjectInstance(objSequence) ri.RiAttributeEnd() ri.RiTransformEnd() instance = instance + 1 ri.RiAttributeEnd() else: ri.RiAttributeBegin() ri.RiAttribute("identifier", "name", [self.objData["name"]]) # give it a name if not shadowPass: self.renderAttributes() ri.RiAttribute("displacementbound", "sphere", self.getProperty("DispBound")) srate = self.getProperty("ShadingRate") sceneRate = self.scene.getProperty("ShadingRate") if srate != sceneRate: ri.RiShadingRate(srate) ri.RiTransformBegin() if shadowPass and self.getProperty("ShadowMats"): self.renderMaterial() elif not shadowPass and not self.getProperty("Matte"): self.renderMaterial() ri.RiTransform(self.object.matrix) # transform # self.renderMeshData() # and render the mesh archiveFile = self.objData["archive"] + self.objData["name"] + ".rib" if self.settings.renderer == "BMRT": archiveFile = archiveFile.replace("\\", "\\\\") ri.RiReadArchive(archiveFile) # this should read from the archive path ri.RiTransformEnd() ri.RiAttributeEnd() # aaaannnnnd done. # I rendered this object, so flag the self.changed = False if len(illumList) > 0: ri.RiAttributeEnd() # pop the graphics state back out def renderMeshData(self): # check first for special case rendering options if self.getProperty("FacePatches"): self.renderFacePatches() else: subsurf = False modifiers = self.object.modifiers for modifier in modifiers: if modifier.type == Blender.Modifier.Type["SUBSURF"]: subsurf = True else: subsurf = False if subsurf: # print "Exporting a subdivision mesh" self.renderSubdivMesh() else: # print "Exporting a standard mesh" self.renderPointsPolygons() def renderArchive(self): """ Write this object to an external archive file. """ # add support here for detecting if materials should be exported to archives or not. # I should probably add that as an option in the export settings dialog. ri.RiBegin(self.objData["archive"] + self.objData["name"] + ".rib") # this should be ok # this is pure geometry here ri.RiAttributeBegin() ri.RiTransformBegin() self.renderMeshData() ri.RiTransformEnd() ri.RiAttributeEnd() ri.RiEnd() def initObjectData(self): """ Initialize the object data for this object. """ # do some mesh-related stuff # all I'm concerned with at the moment is whether or not the mesh in question is subsurfed or not. I don't care about levels and what-not, # since I can grab that from the blender object itself. self.objData = {} self.objData["name"] = self.object.getName() self.objData["output_type"] = "Mesh" self.objData["type"] = self.object.getType() meshObject = self.object.getData() modifiers = self.object.modifiers subsurf = False for modifier in modifiers: # hold up: This is a render-time issue, doesn't belong here. if modifier.type == "Subsurf": # and modifier[Blender.Modifier.Settings.TYPES] == 0: subsurf = True # modified = True renderLevels = modifier[Blender.Modifier.Settings.RENDLEVELS] UV = modifier[Blender.Modifier.Settings.UV] # hold up: This is a render-time issue, doesn't belong here. # modifier[Blender.Modifier.Settings.TYPES] = 1 else: subsurf = False if subsurf == True: self.objData["mesh_type"] = "Subsurf" self.objData["renderLevels"] = renderLevels self.objData["UV"] = UV else: self.objData["mesh_type"] = "mesh" self.objData["material"] = "None" def setGroup(self, group): self.objData["group"] = group self.objEditor.objectGroupButton.setTitle(group) def renderPointsPolygons(self): """ Export Renderman PointsPolygons object. """ params = {} mesh = self.object.getData(False, True) points = [] normals = [] fvNormals = [] faceVarying_N = False autosmooth = False for v in mesh.verts: points.append(v.co) #if autosmooth: # normals.append(vert[4]) #else: # setup for faceVarying normals # if mesh.faces[idx].smooth == 1: # if 1 == 2: # faceVarying_N = True # fvNormals.append(vert[4]) # append the normal # else: # fvNormals.append(mesh.faces[idx].no) # pass normals.append(v.no) # print v.index Cs = range(len(mesh.verts)) # params = {"P":points} nfaces = len(mesh.faces) # print nfaces, " faces found." nverts = [] vertids = [] st = range(len(mesh.verts)) for face in mesh.faces: nverts.append(len(face.v)) if mesh.vertexColors == 1: if len(face.v) > 2: for vertIdx in range(len(face.v)): Cs[face.v[vertIdx].index] = face.col[vertIdx] # should actually average the vert color across if mesh.faceUV == 1: if len(face.v) > 2: for vertIdx in range(len(face.v)): st[face.v[vertIdx].index] = [face.uv[vertIdx][0], 1.0 - face.uv[vertIdx][1]] #else: #if len(mesh.faces[0].uv) != 0: # vtuv = [] # for vertIdx in range(len(face.v)): # uv = face.uv[vertIdx] # uv = uv[0], 1.0 - uv[1] # vertTexUV[face.v[vertIdx].index] = uv if mesh.vertexUV: pass fVerts = [] for v in face.v: vertids.append(v.index) fVerts.append(v.index) # print "Face verts: ", fVerts if mesh.faceUV == 1 and self.getProperty("ExportSt"): ri.RiDeclare("st", "facevarying float[2]") # declare ST just in case params["st"] = st if mesh.vertexColors == 1 and self.getProperty("ExportCS"): vCol = [] for vertCol in Cs: vCol.append([vertCol.r / 256.0, vertCol.g / 256.0, vertCol.b / 256.0]) params["Cs"] = vCol # handle normals issues if faceVarying_N: ri.RiDeclare("N", "facevarying normal") # declare N to be facevarying params["N"] = normals elif autosmooth: params["N"] = normals params = {"P":points } if mesh.faceUV == 1: #print st params["st"] = st elif mesh.vertexColors == 1: vCol = [] for vertCol in Cs: vCol.append([vertCol.r / 256.0, vertCol.g / 256.0, vertCol.b / 256.0]) #print Cs params["Cs"] = vCol else: #print vertTexUV # params["st"] = vertTexUV pass #print nfaces #print nverts #print vertids #print params ri.RiPointsGeneralPolygons(nfaces*[1], nverts, vertids, params) def xrenderSubdivMesh(self): """ render a subdivision mesh This version reorders everything (faces, verts) in the mesh so it's sorted by z, x, y """ getCs = False getSt = False mesh = self.object.getData(False, True) vList = range(len(mesh.verts)) # vertex list # vertsByIndex = range(len(mesh.verts)) facesByZ = range(len(mesh.faces)) zVerts = [] if mesh.vertexColors == 1: getCs = True if mesh.faceUV == 1: getSt = True # create a list of faces sorted by least z,x,y by using the least z,x,y vert of the face as the sorting key for face in mesh.faces: # build an array of z,x,y-least verts from this face, then use the least of that as an index to this face. zList = [] for vert in face.verts: zList.append((vert.co[2], vert.co[0], vert.co[1], vert.index, face.index)) # ok, both vertex and face indexes vList[vert.index] = [vert.co[2], vert.co[0], vert.co[1], vert.index] zList.sort() zVerts.append(zList[0]) zVerts.sort() # this is now the list of *faces* by Z vList.sort() # this is the list of *verts* sorted by Z points = [] crossRef = range(len(mesh.verts)) index = 0 # create the cross reference list that indexes: # new index (index) vs. blender index (vert[4]]) # so that crossRef[index] results in original vert.index # face vert ID access is thus # fVertIdx = crossRef[face.vert[index].index] for vert in vList: crossRef[vert[3]] = index # cross reference item index = index + 1 points.append([vert[1], vert[2], vert[0]]) # append to the POINTS array # now generate a list of vertex IDs for each face by iterating zVerts # retrieving the face index, getting the face verts, and cross-referencing them with the # new z,x,y sorted verts in points. vertIDs = [] nVerts = [] normals = [] fvNormals = [] uv = [] autosmooth = False faceVarying_N = False Cs = range(len(mesh.verts)) st = range(len(mesh.verts)) print facesByZ for zVert in zVerts: print "Zvert is:", zVert idx = zVert[4] face = mesh.faces[idx] vertList = [] for vert in face.verts: vertList.append((vert.co[2], vert.co[0], vert.co[1], vert.index, vert.no)) nVerts.append(len(vertList)) # number of verts vertList.sort() # sort the verts here for vert in vertList: # step one, point data vertIDs.append(crossRef[vert[3]]) # vertex id for this vert if autosmooth: normals.append(vert[4]) else: # setup for faceVarying normals if mesh.faces[idx].smooth == 1: faceVarying_N = True fvNormals.append(vert[4]) # append the normal else: fvNormals.append(mesh.faces[idx].no) if getCs: Cs[crossRef[vert[3]]] = face.col[vert[3]] if getSt: # here is an issue # Do I want to test to see if this value exists already? or not? I know that I've already got a range here, so st[crossRef[vert[3]]] = [face.uv[vert[3]][0],1.0 - face.uv[vert[3]][1]] creases = {} # develop a list of creases based on crease value. for edge in mesh.edges: if edge.crease > 0: if edge.crease not in creases: creases[edge.crease] = [] creases[edge.crease].append([crossRef[edge.v1.index], crossRef[edge.v2.index]]) creaselist = [] for crease in creases: # for each crease group, create a set of vertices and merge verts = [] i_set = Set() setlist = [] edgelist = creases[crease] for edge in edgelist: i_set.add(edge[0]) i_set.add(edge[1]) for item in i_set: set = Set() set.add(item) setlist.append(set) for edge in edgelist: seta = self.find_set(edge[0], setlist) if edge[1] not in seta: setb = self.find_set(edge[1], setlist) newset = self.merge_set(seta, setb) setlist.remove(seta) setlist.remove(setb) setlist.append(newset) # print "Creases for crease level: ", crease, " are ", setlist for item in setlist: creaselist.append([crease, item]) # this will add to the flat list of crease objects that I need. tags = [] nargs = [] intargs = [] floatargs = [] for crease in creaselist: if type(crease) != type(1): tags.append("crease") nargs.append(len(crease[1])) nargs.append(1) for item in crease[1]: intargs.append(item) val = (float(crease[0]) / 255) * 5.0 floatargs.append(val) # normalized currently for the Aqsis renderer # I should make this a property if self.getProperty("interpolateBoundary"): tags.append("interpolateboundary") nargs.append(0) nargs.append(0) params = {"P":points } if autosmooth or faceVarying_N: params["N"] = normals if mesh.faceUV == 1 and self.getProperty("ExportSt"): ri.RiDeclare("st", "facevarying float[2]") # declare ST just in case params["st"] = st if mesh.vertexColors == 1 and self.getProperty("ExportCS"): vCol = [] for vertCol in Cs: vCol.append([vertCol.r / 256.0, vertCol.g / 256.0, vertCol.b / 256.0]) params["Cs"] = vCol # handle normals issues if faceVarying_N: ri.RiDeclare("N", "facevarying normal") # declare N to be facevarying ri.RiSubdivisionMesh("catmull-clark", nVerts, vertIDs, tags, nargs, intargs, floatargs, params) def sortFaceVerts(self, face): v_list = [] for vert in face.verts: v_list = (vert.co[2], vert.co[1], vert.co[0], vert.index) v_list.sort() return v_list def obfuscate(self): # don't forget reference geometry. I need the base mesh before lattice/armature transforms are applied to it. # I can probably disable all modifiers except for decimate and gather that mesh, # then turn them all back on (excepting subsurf) and gather *that* mesh # then I do # params["PRef"] = refPoints # for all the other stuff, I also need # params["Cs"] = vertColors # per vertex colors # params["Cs"] = vertCoors # or Face UV colors # params["FModes"] = faceModes # face display modes to pass to custom shaders pass def renderSubdivMesh(self): """ Export Subdivision mesh. """ mesh = self.object.getData(False, True) modifiers = self.object.modifiers points = [] normals = [] uv = [] faceModes = [] # get the verts by ID vertTexUV = [] for vert in mesh.verts: points.append(vert.co) normals.append(vert.no) if mesh.faceUV == 0: vertTexUV.append(0) nfaces = len(mesh.faces) nverts = [] vertids = [] Cs = range(len(mesh.verts)) st = range(len(mesh.verts)) # get the faces by vertex ID getCs = False getSt = False if mesh.vertexColors == 1: getCs = True if mesh.faceUV == 1: getSt = True edge_faces = self.edge_face_users(mesh) for face in mesh.faces: # print face.index nverts.append(len(face.v)) vtuv = [] if len(face.v) > 2: for vertIdx in range(len(face.v)): if getCs: Cs[face.v[vertIdx].index] = face.col[vertIdx] if getSt: # here is an issue # Do I want to test to see if this value exists already? or not? I know that I've already got a range here, so if st[face.v[vertIdx].index] == face.v[vertIdx].index: st[face.v[vertIdx].index] = [face.uv[vertIdx][0],1.0 - face.uv[vertIdx][1]] for vert in face.v: vertids.append(vert.index) if 1 == 2: # this has to be calculated per edge, so what I probably want to do first is build up a list of face index per edge # get the normals of each face in the edge_faces list creases = {} creaselist = range(len(mesh.edges)) edgeIdx = 0 maxAng = self.getProperty("MaxCreaseAngle") for edge in edge_faces: if len(edge) > 1: a = edge[0] b = edge[1] ang = Blender.Mathutils.AngleBetweenVecs(a.no, b.no) # my angle range is -90 to 90 degrees # thus if ang < 0: ang = -ang if ang > maxAng: factor = 1.0 else: # calculate the value for the crease using 0-1.0 range, as applied to the range 0-maxAng. factor = ang / maxAng creaselist[edgeIdx] = [factor, [mesh.edges[edgeIdx].v1.index, mesh.edges[edgeIdx].v2.index]] # this is 1:1 because of how I'm doing this edgeIdx = edgeIdx + 1 else: # get the creases creases = {} # develop a list of creases based on crease value. for edge in mesh.edges: if edge.crease > 0: if edge.crease not in creases: creases[edge.crease] = [] creases[edge.crease].append([edge.v1.index, edge.v2.index]) creaselist = [] for crease in creases: # for each crease group, create a set of vertices and merge verts = [] i_set = Set() setlist = [] edgelist = creases[crease] for edge in edgelist: i_set.add(edge[0]) i_set.add(edge[1]) for item in i_set: set = Set() set.add(item) setlist.append(set) for edge in edgelist: seta = self.find_set(edge[0], setlist) if edge[1] not in seta: setb = self.find_set(edge[1], setlist) newset = self.merge_set(seta, setb) setlist.remove(seta) setlist.remove(setb) setlist.append(newset) # print "Creases for crease level: ", crease, " are ", setlist for item in setlist: creaselist.append([crease, item]) # this will add to the flat list of crease objects that I need. tags = [] nargs = [] intargs = [] floatargs = [] for crease in creaselist: if type(crease) != type(1): tags.append("crease") nargs.append(len(crease[1])) nargs.append(1) for item in crease[1]: intargs.append(item) val = (float(crease[0]) / 255) * 5.0 floatargs.append(val) # normalized currently for the Aqsis renderer tags.append("interpolateboundary") nargs.append(0) nargs.append(0) params = {"P":points, "N":normals} if mesh.faceUV == 1 and self.getProperty("ExportSt"): params["st"] = st if mesh.vertexColors == 1 and self.getProperty("ExportCS"): vCol = [] for vertCol in Cs: vCol.append([vertCol.r / 256.0, vertCol.g / 256.0, vertCol.b / 256.0]) params["Cs"] = vCol # and why can't I output both? if 1 == 2: print "nfaces: ", nfaces print "nverts: ", nverts print "vertids: ", vertids print "ntags: ", len(tags) print "tags: ", tags print "nargs: ", nargs print "intargs: ", intargs print "floatargs: ", floatargs print "params: ", params # and now to build the call if mesh.faceUV == 1 and self.getProperty("ExportST"): ri.RiDeclare("st", "facevarying float[2]") # declare ST just in case ri.RiSubdivisionMesh("catmull-clark", nverts, vertids, tags, nargs, intargs, floatargs, params) def find_set(self, val, set_list): """ Find a set in a set""" for set in set_list: if val in set: return set def merge_set(self, seta, setb): """ merge two sets """ return seta.union(setb) def renderFacePatches(self): mesh = self.object.getData(False, True) for face in mesh.faces: # for all faces with four verts, replace the face with a patch v = face.v if len(v) == 4: # render a patch ri.RiPatch("bilinear", {"P":[v[0].co, v[1].co, v[3].co, v[2].co] }) # from BpyMesh def sorted_edge_indicies(self, ed): i1= ed.v1.index i2= ed.v2.index if i1>i2: i1,i2= i2,i1 return i1, i2 def edge_face_users(self, me): ''' Takes a mesh and returns a list aligned with the meshes edges. Each item is a list of the faces that use the edge would be the equiv for having ed.face_users as a property ''' face_edges_dict= dict([(self.sorted_edge_indicies(ed), (ed.index, [])) for ed in me.edges]) for f in me.faces: fvi= [v.index for v in f.v]# face vert idx's for i in xrange(len(f)): i1= fvi[i] i2= fvi[i-1] if i1>i2: i1,i2= i2,i1 face_edges_dict[i1,i2][1].append(f) face_edges= [None] * len(me.edges) for ed_index, ed_faces in face_edges_dict.itervalues(): face_edges[ed_index]= ed_faces return face_edges def face_edges(self, me): ''' Returns a list alligned to the meshes faces. each item is a list of lists: that is face_edges -> face indicies face_edges[i] -> list referencs local faces v indicies 1,2,3 &| 4 face_edges[i][j] -> list of faces that this edge uses. crap this is tricky to explain :/ ''' face_edges= [ [None] * len(f) for f in me.faces ] face_edges_dict= dict([(self.sorted_edge_indicies(ed), []) for ed in me.edges]) for fidx, f in enumerate(me.faces): fvi= [v.index for v in f.v]# face vert idx's for i in xrange(len(f)): i1= fvi[i] i2= fvi[i-1] if i1>i2: i1,i2= i2,i1 edge_face_users= face_edges_dict[i1,i2] edge_face_users.append(f) face_edges[fidx][i]= edge_face_users return face_edges class LampAdapter(ObjectAdapter): """ BtoR Lamp Adapter object """ protocols.advise(instancesProvide=[IObjectAdapter], asAdapterForTypes=[BtoRLamp]) def __init__(self, object): """ Initialize a Lamp export adapter """ ObjectAdapter.__init__(self, object) self.isAnimated = False self.shadowMaps = {} # indexed by direction_frame method def render(self): """ Generate renderman data for this object. """ # I should call checkReset here to make sure I have the latest/greatest up to date data for this lamp, no? # shader = self.getProperty("shader").getObject().shader self.checkReset() self.shader.updateShaderParams() # I should probably get the light handle here if self.getProperty("transformLight"): ri.RiTransformBegin() ri.RiTranform(self.object.getMatrix()) lightHandle = ri.RiLightSource(self.shader.shader.shadername, self.shader.shader.params()) if self.scene.getProperty("RenderLayers"): allLayers = True else: allLayers = False # I need lightHandle for turning lights on and off for layer support if self.lighting.getProperty("layerLighting"): # # unless this light is flagged as global, turn it off in the current graphics state. if not self.getProperty("globalLight"): ri.RiIlluminate(lightHandle, ri.RI_FALSE) for layer in self.object.layers: if allLayers: self.scene.lightingLayers[layer].append(lightHandle) else: if layer in self.scene.layers: # if the current layer is ON in the global layer list, then illuminate this layer self.scene.lightingLayers[layer].append(lightHandle) if self.getProperty("transformLight"): ri.RiTransformEnd() def generateRSL(self): """ Create RSL for this light. """ pass def getScreenWindow(self, camera): """ Figure out the mapping between screen space and world space for an orthographic transform """ # get the camera's matrix. cMat = camera.getMatrix() # this gives me the camera's transform matrix. # now what I have to do is def genCheckSum(self): # the only settings I care about are # scale/rot/trans, obviously. I can probably get the transform matrix and use that, or make vectors of the three and go from there # then I want the type # the color...energy...'only shadow'...maybe the textures. lamp = self.obj.getData() name = self.obj.getName() intensity = "%f" % lamp.intensity loc = "%f_%f_%f" % (self.obj.LocX, self.obj.LocY, self.obj.LocZ) rot = "%f_%f_%f" % (self.obj.RotX, self.obj.RotY, self.obj.RotZ) scale = "%f_%f_%f" % (self.obj.ScaleX, self.obj.ScaleY, self.obj.ScaleZ) energy = "%f" % lamp.getEnergy() r = "%f" % lamp.R g = "%f" % lamp.G b = "%f" % lamp.B # lamp types # 0 - point # 1 - distant/sun # 2 - spot # 3 - hemi # 4 - area seed = name + intensity + loc + rot + scale + r + g + b # and this covers the pointlight if self.obj.getType() in [1, 2]: # I'm interested in spotsi and bias stuff # I probably want to worry about falloff too bias = "%f" % lamp.bias samples = "%f" % lamp.samples spotsize = "%f" % lamp.spotSize spotblend = "%f" % lamp.spotBlend seed = seed + bias + samples + spotsize + spotblend # and this covers spots # now I have to worry about shader parameters being changed, because in the case of non-standard shader params (you know, different light shader), I might need to regenerate the shadow map even if the # light wasn't actually touched in blender. shader = self.getProperty("shader").getObject().shader shaderName = shader.shaderName() # should I worry about the filename too? params = shader.params().__repr__() hash = md5.new(seed) return hash def doCameraTransform(self, axis): if axis != "shadow": # I still need to transform based on the light's matrix # get the inverse matrix first # if this is point light, I should probably set the rotation values to zero ri.RiScale(-1, 1, 1) if axis == "px": ri.RiRotate(180, 0, 1, 0) ri.RiRotate(90, 0, 1, 0) elif axis == "nx": ri.RiRotate(180, 0, 1, 0) ri.RiRotate(-90, 0, 1, 0) elif axis == "py": ri.RiRotate(-90, 1, 0, 0) elif axis == "ny": ri.RiRotate(90, 1, 0, 0) elif axis == "nz": ri.RiRotate(180, 0, 1, 0) ri.RiTranslate(-self.object.LocX, -self.object.LocY, -self.object.LocZ) else: ri.RiScale(-1, 1, 1) #ri.RiRotate(180, 0, 1, 0) cmatrix = self.object.getInverseMatrix() print cmatrix matrix = [[cmatrix[0][0], cmatrix[0][1], -cmatrix[0][2], cmatrix[0][3]], [cmatrix[1][0], cmatrix[1][1], -cmatrix[1][2], cmatrix[1][3]], [cmatrix[2][0], cmatrix[2][1], -cmatrix[2][2], cmatrix[2][3]], [cmatrix[3][0], cmatrix[3][1], -cmatrix[3][2], cmatrix[3][3]]] ri.RiTransform(matrix) self.genChecksum() def getClippingRange(self): light = self.object.getData() return [light.getClipStart(), light.getClipEnd()] def getRenderProjection(self): if self.object.getData().getType() == 0: projection = "perspective" elif self.object.getData().getType() == 1: projection = "orthographic" elif self.object.getData().getType() == 2: projection = "perspective" else: projection = "perspective" return projection def setShadowParms(self, params): shadername = self.shader.shader.shadername sparams = self.shader.shader.shaderparams #if shadername == "shadowpoint": if sparams.has_key("sfpx"): self.shader.setParamValue("sfpx", params["px"]["shadowName"]) self.shader.setParamValue("sfpy", params["py"]["shadowName"]) self.shader.setParamValue("sfpz", params["pz"]["shadowName"]) self.shader.setParamValue("sfnx", params["nx"]["shadowName"]) self.shader.setParamValue("sfny", params["ny"]["shadowName"]) self.shader.setParamValue("sfnz", params["nz"]["shadowName"]) elif sparams.has_key("px"): self.shader.setParamValue("px", params["px"]["shadowName"]) self.shader.setParamValue("py", params["py"]["shadowName"]) self.shader.setParamValue("pz", params["pz"]["shadowName"]) self.shader.setParamValue("nx", params["nx"]["shadowName"]) self.shader.setParamValue("ny", params["ny"]["shadowName"]) self.shader.setParamValue("nz", params["nz"]["shadowName"]) elif sparams.has_key("shadow"): if params.has_key("shadowName"): self.shader.setParamValue("shadow", params["shadow"]["shadowName"]) elif sparams.has_key("shadowname"): print params if params.has_key("shadow"): self.shader.setParamValue("shadowname", params["shadow"]["shadowName"]) #elif shadername == "shadowspot" or shadername == "shadowdistant" or shadername == "bml": # setattr(self.shader.shader, "shadowname", params["shadow"]["shadowName"]) # self.shader.setParamValue("shadowname", params["shadow"]["shadowName"]) def getRenderDirections(self): print "Light Type is:", self.object.getData().getType() if self.object.getData().getType() == 0: return ["px", "py", "pz", "nx", "ny", "nz"] elif self.object.getData().getType() == 1: # determine how to bring the shadow map back here return ["shadow"] elif self.object.getData().getType() == 2: # figure out how to render the direction this light is pointing return ["shadow"] else: return [] def initObjectData(self): self.objData = {} self.objData["name"] = self.object.getName() self.objData["type"] = self.object.getType() self.shader_type = "light" # figure out the type of lamp, and accordingly use the basic types available via BML shaderParms = {} lamp = self.object.getData() x = self.object.matrix[3][0] / self.object.matrix[3][3] y = self.object.matrix[3][1] / self.object.matrix[3][3] z = self.object.matrix[3][2] / self.object.matrix[3][3] tox = -self.object.matrix[2][0] + self.object.matrix[3][0] toy = -self.object.matrix[2][1] + self.object.matrix[3][1] toz = -self.object.matrix[2][2] + self.object.matrix[3][2] if lamp.getMode() & lamp.Modes['Negative']: negative = -1 else: negative = 1 if self.settings.use_slparams: ext = ".sl" else: ext = "." + self.settings.renderers[self.settings.renderer][4] # am I going to worry about intensity and color? Maybe not, because that will change per shader I think. # perhaps I should gather the parameters for the bml shader and use that as my primary lighting shader. shaderParms["intensity"] = lamp.getEnergy() # I'm only worried at the moment about deriving the correct shader to use as a starting point for the lamp. # thus shaderParms["from"] = [x, y, z] shaderParms["lightcolor"] = [lamp.R, lamp.G, lamp.B] self.objData["lightcolor"] = [lamp.R, lamp.G, lamp.B] if lamp.type == 0 or lamp.type == 4: self.object.RotX = 0.0 self.object.RotY = 0.0 self.object.RotZ = 0.0 self.objData["type"] = 0 energyRatio = lamp.dist * negative # get the first selected shader path...and hope it's setup correctly shaderPath = self.settings.getShaderSearchPaths()[0] if self.lighting.getProperty("GenShadowMaps"): sFilename = "shadowpoint" + ext else: sFilename = "pointlight" + ext self.objData["shaderfilename"] = os.path.normpath(shaderPath + os.sep + sFilename) # I'm only really concerned about this if I'm using sl params self.objData["shadername"] = "pointlight" shaderParms["intensity"] = (energyRatio * lamp.energy) * self.getProperty("Multiplier") elif lamp.type == 1: self.objData["type"] = 1 energyRatio = negative self.objData["shadername"] = "distantlight" shaderPath = self.settings.getShaderSearchPaths()[0] if self.lighting.getProperty("GenShadowMaps"): sFilename = "shadowdistant" + ext else: sFilename = "distantlight" + ext self.objData["shaderfilename"] = os.path.normpath(shaderPath + os.sep + sFilename) shaderParms["to"] = [ tox, toy, toz] shaderParms["intensity"] = (energyRatio * lamp.energy) * self.getProperty("Multiplier") elif lamp.type == 2: self.objData["type"] = 2 energyRatio = lamp.dist * negative shaderPath = self.settings.getShaderSearchPaths()[0] #if self.settings.useShadowMaps.getValue(): # sFilename = "shadowspot.sl" #else: if self.lighting.getProperty("GenShadowMaps"): sFilename = "shadowspot" + ext self.objData["shadername"] = "shadowspot" else: self.objData["shadername"] = "spotlight" sFilename = "spotlight" + ext self.objData["shaderfilename"] = os.path.normpath(shaderPath + os.sep + sFilename) # shaderParms["shadowbias"] = lamp.bias #shaderParms["blur"] = 0.0 #shaderParms["samples"] = lamp.samples shaderParms["coneangle"] = (lamp.spotSize * math.pi / 360) shaderParms["conedeltaangle"] = (lamp.spotBlend * (lamp.spotSize * math.pi / 360)) shaderParms["to"] = [tox, toy, toz] shaderParms["intensity"] = (energyRatio * lamp.energy) * self.lighting.getProperty("Multiplier") # This might need to be animated, so I need to add a function to deal with that if self.lighting.getProperty("GenShadowMaps"): shaderParms["shadowname"] = self.object.getName() + "shadow.tx" else: shaderParms["shadowname"] = None elif lamp.type == 3: self.objData["type"] = 3 energyRatio = negative self.objData["shadername"] = "hemilight" shaderPath = self.settings.getShaderSearchPaths()[0] self.objData["shaderfilename"] = os.path.normpath(shaderPath + os.sep + "hemilight" + ext) shaderParms["to"] = [tox, toy, toz] shaderParms["falloff"] = 0 shaderParms["intensity"] = (energyRatio * lamp.energy) * self.getProperty("Multiplier") self.objData["shaderparms"] = shaderParms self.initShader() # initialize my light shader def checkReset(self): # here I want to check if the lamp settings need changing or not. # if the user has selected a shader type that doesn't match the lamp settings, all I want to affect in that case # is the light color. shaderParms = self.objData["shaderparms"] lamp = self.object.getData() # for the most part, follow the parameters for the given light object and stuff the values into the # shader parms if self.getProperty("autoLighting"): mat = self.object.getMatrix() x = self.object.matrix[3][0] / self.object.matrix[3][3] y = self.object.matrix[3][1] / self.object.matrix[3][3] z = self.object.matrix[3][2] / self.object.matrix[3][3] tox = -self.object.matrix[2][0] + self.object.matrix[3][0] toy = -self.object.matrix[2][1] + self.object.matrix[3][1] toz = -self.object.matrix[2][2] + self.object.matrix[3][2] shaderParms["intensity"] = lamp.getEnergy() if self.objData["type"] == lamp.type: if lamp.getMode() & lamp.Modes['Negative']: negative = -1 else: negative = 1 shaderParms["from"] = [x, y, z] shaderParms["lightcolor"] = [lamp.R, lamp.G, lamp.B] self.objData["lightcolor"] = [lamp.R, lamp.G, lamp.B] if (lamp.type == 0 or lamp.type == 4) and (self.shader.getShaderName() == "pointlight" or self.shader.getShaderName() == "shadowpoint"): self.object.RotX = 0.0 self.object.RotY = 0.0 self.object.RotZ = 0.0 energyRatio = lamp.dist * negative # get the first selected shader path...and hope it's setup correctly shaderParms["intensity"] = (energyRatio * lamp.energy) * self.lighting.getProperty("Multiplier") elif lamp.type == 1 and (self.shader.getShaderName() == "distantlight" or self.shader.getShaderName() == "shadowdistant"): energyRatio = negative # self.objData["shadername"] = "distantlight" shaderParms["to"] = [ tox, toy, toz] elif lamp.type == 2 and (self.shader.getShaderName() == "shadowspot" or self.shader.getShaderName() == "spotlight"): energyRatio = lamp.dist * negative # shaderParms["shadowbias"] = lamp.bias #if self.shader.getShaderName() == "shadowspot": # shaderParms["blur"] = 0.0 # shaderParms["samples"] = lamp.samples shaderParms["coneangle"] = (lamp.spotSize * math.pi / 360) shaderParms["conedeltaangle"] = (lamp.spotBlend * (lamp.spotSize * math.pi / 360)) shaderParms["to"] = [tox, toy, toz] shaderParms["intensity"] = (energyRatio * lamp.energy) * self.lighting.getProperty("Multiplier") elif lamp.type == 3 and self.shader.getShaderName() == "hemilight": energyRatio = negative shaderParms["to"] = [tox, toy, toz] shaderParms["falloff"] = 0 shaderParms["intensity"] = energyRatio * lamp.energy self.objData["shaderparms"] = shaderParms # and reset the light color. self.objData["lightcolor"] = [lamp.R, lamp.G, lamp.B] for key in shaderParms: self.shader.setParamValue(key, shaderParms[key]) self.objData["reset"] = False else: self.initObjectData() self.objData["reset"] = True else: mat = self.object.getMatrix() x = self.object.matrix[3][0] / self.object.matrix[3][3] y = self.object.matrix[3][1] / self.object.matrix[3][3] z = self.object.matrix[3][2] / self.object.matrix[3][3] tox = -self.object.matrix[2][0] + self.object.matrix[3][0] toy = -self.object.matrix[2][1] + self.object.matrix[3][1] toz = -self.object.matrix[2][2] + self.object.matrix[3][2] if lamp.getMode() & lamp.Modes['Negative']: negative = -1 else: negative = 1 params = self.shader.shader.shaderparams if params.has_key("intensity"): energyRatio = lamp.dist * negative shaderParms["intensity"] = (energyRatio * lamp.energy) * self.getProperty("Multiplier") if params.has_key("from"): shaderParms["from"] = [x, y, z] if params.has_key("to"): shaderParms["to"] = [tox, toy, toz] if params.has_key("lightcolor"): shaderParms["lightcolor"] = [lamp.R, lamp.G, lamp.B] for key in shaderParms: self.shader.setParamValue(key, shaderParms[key]) self.objData["reset"] = False def getSelector(self): return self.objEditor.getSelector() class MBallAdapter(ObjectAdapter): protocols.advise(instancesProvide=[IObjectAdapter], asAdapterForTypes=[BtoRMBall]) def __init__(self, object): """ Initialize a metaball export adapter """ ObjectAdapter.__init__(self, object) def render(self): """ generate Renderman data for this object """ # create RiBlobbies or convert to mesh depending upon renderer target and support def initObjectData(self): """ Initialize BtoR object data for this object """ print self.object class CurveAdapter(ObjectAdapter): """ Curve export adapter """ protocols.advise(instancesProvide=[IObjectAdapter], asAdapterForTypes=[BtoRCurve]) def __init__(self, object): """ initialize a CurveAdapter """ ObjectAdapter.__init__(self, object) self.object = object def render(self): """ generate Renderman data for this object """ pass def renderArchive(self): ri.RiBegin(self.objData["archive"] + self.objData["name"] + ".rib") # this is pure geometry here #ri.RiAttributeBegin() #ri.RiTransformBegin() #self.renderCurveData() #ri.RiTransformEnd() #ri.RiAttributeEnd() #ri.RiEnd() def renderCurveData(self): """ renders curve data to RiCurve objects """ #curve = self.object.getData() #nVerts = len(curve.verts) def initObjectData(self): """ Initialize BtoR object data for this object """ # do some mesh-related stuff # all I'm concerned with at the moment is whether or not the mesh in question is subsurfed or not. I don't care about levels and what-not, # since I can grab that from the blender object itself. self.objData = {} self.objData["name"] = self.object.getName() self.objData["type"] = self.object.getType() #curveObject = self.object.getData() self.objData["material"] = "None" class SurfaceAdapter(ObjectAdapter): protocols.advise(instancesProvide=[IObjectAdapter], asAdapterForTypes=[BtoRSurf]) def __init__(self, object): """ Iniitialize a surface export adapter """ ObjectAdapter.__init__(self, object) #self.editorPanel.title = "Surface Export Settings:" def render(self): """ generate Renderman data for this object """ def initObjectData(self): """ Initialize BtoR object data for this object """ class CameraAdapter(ObjectAdapter): protocols.advise(instancesProvide=[IObjectAdapter], asAdapterForTypes=[BtoRCamera]) def __init__(self, object): ObjectAdapter.__init__(self, object) def render(self): """ generate Renderman data for this object """ shader = self.getProperty("shader").getObject() # self.objEditor.shaderButton.title = self.imagerShader.shader_menu.getValue() if self.settings.renderer != "Pixie": if self.getProperty("autoImager"): bWorld = Blender.World.GetCurrent() if bWorld != None: if bWorld.hor != [0, 0, 0]: ri.RiDeclare("bgcolor", "uniform color") ri.RiDeclare("background", "uniform color") iparams = { "bgcolor" : [bWorld.hor[0], bWorld.hor[1], bWorld.hor[2]], "background" : [bWorld.hor[0], bWorld.hor[1], bWorld.hor[2]] } ri.RiImager( "background", iparams ) elif shader.shader != None: if shader.getShaderName() != None and shader.getShaderName != "None Selected": shader.updateShaderParams() ishader = shader.shader if ishader.shadername != "" or ishader.shaderName() != None: ri.RiImager(ishader.shadername, ishader.params()) # and done scene = Blender.Scene.GetCurrent() render = scene.getRenderingContext() camera = self.object.getData() cType = camera.getType() ri.RiFormat(render.imageSizeX(), render.imageSizeY(), 1) # leave aspect at 1 for the moment if cType == 0: if render.imageSizeX() >= render.imageSizeY(): factor = render.imageSizeY() / float(render.imageSizeX()) else: factor = render.imageSizeX() / float(render.imageSizeY()) fov = 360.0 * math.atan((16 * factor) / camera.lens) / math.pi print "Using a ", fov, " degree Field of View" ri.RiProjection("perspective", "fov", fov) # Depth of field if self.properties["DOF"].getValue(): ri.RiDepthOfField(self.properties["fstop"].getValue(), self.properties["focallength"].getValue(), self.properties["focaldistance"].getValue()) # Depth of field done else: self.objData["scale"] = camera.getScale() ri.RiProjection("orthographic") # ri.RiFrameAspectRatio(factor) # Camera clipping ri.RiClipping(camera.getClipStart(), camera.getClipEnd()) # Viewpoint transform vecX = Blender.Mathutils.Vector(1, 0, 0) cmatrix = self.object.getInverseMatrix() sMat = Blender.Mathutils.ScaleMatrix(-1, 4, vecX) rMat = Blender.Mathutils.RotationMatrix(180, 4, "y") mat = cmatrix * sMat * rMat ri.RiTransform(mat) #ri.RiTranslate(0, 0, 1) def initObjectData(self): # object initia.ize """ Initialize BtoR object data for this object """ self.objData = {} self.objData["name"] = self.object.getName() self.objData["type"] = self.object.getType() self.shader_type = "imager" shaderParams = {} shaderPath = self.settings.getShaderSearchPaths()[0] # this is a potential deficiency self.objData["shaderfilename"] = os.path.normpath(shaderPath + os.sep + "background.sl") bWorld = Blender.World.GetCurrent() shaderParams["bgcolor"] = [bWorld.hor[0], bWorld.hor[1], bWorld.hor[2]] shaderParams["background"] = [bWorld.hor[0], bWorld.hor[1], bWorld.hor[2]] self.objData["shaderparams"] = shaderParams self.initShader() # shader should be initialized, so then self.shader.setParamValue("bgcolor",shaderParams["bgcolor"]) self.shader.setParamValue("background", shaderParams["bgcolor"]) class PreviewAdapter(ObjectAdapter): protocols.advise(instancesProvide=[IObjectAdapter], asAdapterForTypes=[BtoRPreview]) def __init__(self, object): self.object = object def initObjectData(self): # what does a preview object need? pass def getInfo(self): pass def loadData(self): pass def saveData(self): pass def render(self): # what do I do here? Just push out the same verts that I need # this is a special case anyway if self.isSubdiv: self.renderSubdivMesh() else: self.renderPointsPolygons() def renderPointsPolygons(self): """ Export Renderman PointsPolygons object. """ mesh = self.object.obj points = [] normals = [] for v in mesh.verts: points.append(v.co) normals.append(v.no) # print v.index Cs = range(len(mesh.verts)) # params = {"P":points} nfaces = len(mesh.faces) # print nfaces, " faces found." nverts = [] vertids = [] for face in mesh.faces: nverts.append(len(face.v)) if mesh.vertexColors == 1: if len(face.v) > 2: for vertIdx in range(len(face.v)): Cs[face.v[vertIdx].index] = face.col[vertIdx] # should actually average the vert color across if mesh.faceUV == 1: if len(face.v) > 2: for vertIdx in range(len(face.v)): st[face.v[vertIdx].index] = [face.uv[vertIdx][0], 1.0 - face.uv[vertIdx][1]] #else: #if len(mesh.faces[0].uv) != 0: # vtuv = [] # for vertIdx in range(len(face.v)): # uv = face.uv[vertIdx] # uv = uv[0], 1.0 - uv[1] # vertTexUV[face.v[vertIdx].index] = uv if mesh.vertexUV: pass fVerts = [] for v in face.v: vertids.append(v.index) fVerts.append(v.index) # print "Face verts: ", fVerts params = {"P":points, "N":normals} if mesh.faceUV == 1: #print st params["st"] = st elif mesh.vertexColors == 1: vCol = [] for vertCol in Cs: vCol.append([vertCol.r / 256.0, vertCol.g / 256.0, vertCol.b / 256.0]) #print Cs params["Cs"] = vCol else: #print vertTexUV # params["st"] = vertTexUV pass ri.RiPointsGeneralPolygons(nfaces*[1], nverts, vertids, params) def renderSubdivMesh(self): """ Export Subdivision mesh. """ mesh = self.object.obj points = [] normals = [] uv = [] faceModes = [] st = [] # get the verts by ID vertTexUV = [] for vert in mesh.verts: points.append(vert.co) normals.append(vert.no) if mesh.faceUV == 0: vertTexUV.append(0) nfaces = len(mesh.faces) nverts = [] vertids = [] Cs = range(len(mesh.verts)) # get the faces by vertex ID for face in mesh.faces: nverts.append(len(face.v)) if mesh.vertexColors == 1: if len(face.v) > 2: for vertIdx in range(len(face.v)): Cs[face.v[vertIdx].index] = face.col[vertIdx] # should actually average the vert color across if mesh.faceUV == 1: if len(face.v) > 2: for vertIdx in range(len(face.v)): st[face.v[vertIdx].index] = [face.uv[vertIdx][0], 1.0 - face.uv[vertIdx][1]] #else: # if len(mesh.faces[0].uv) != 0: # vtuv = [] # for vertIdx in range(len(face.v)): # uv = face.uv[vertIdx] # uv = uv[0], 1.0 - uv[1] # vertTexUV[face.v[vertIdx].index] = uv for vert in face.v: vertids.append(vert.index) # get the creases creases = {} # develop a list of creases based on crease value. for edge in mesh.edges: if edge.crease > 0: if edge.crease not in creases: creases[edge.crease] = [] creases[edge.crease].append([edge.v1.index, edge.v2.index]) creaselist = [] for crease in creases: # for each crease group, create a set of vertices and merge verts = [] i_set = Set() setlist = [] edgelist = creases[crease] for edge in edgelist: i_set.add(edge[0]) i_set.add(edge[1]) for item in i_set: set = Set() set.add(item) setlist.append(set) for edge in edgelist: seta = self.find_set(edge[0], setlist) if edge[1] not in seta: setb = self.find_set(edge[1], setlist) newset = self.merge_set(seta, setb) setlist.remove(seta) setlist.remove(setb) setlist.append(newset) # print "Creases for crease level: ", crease, " are ", setlist for item in setlist: creaselist.append([crease, item]) # this will add to the flat list of crease objects that I need. # don't forget reference geometry. I need the base mesh before lattice/armature transforms are applied to it. # I can probably disable all modifiers except for decimate and gather that mesh, # then turn them all back on (excepting subsurf) and gather *that* mesh # then I do # params["PRef"] = refPoints # for all the other stuff, I also need # params["Cs"] = vertColors # per vertex colors # params["Cs"] = vertCoors # or Face UV colors # params["FModes"] = faceModes # face display modes to pass to custom shaders tags = [] nargs = [] intargs = [] floatargs = [] for crease in creaselist: # print crease tags.append("crease") nargs.append(len(crease[1])) nargs.append(1) for item in crease[1]: intargs.append(item) val = (float(crease[0]) / 255) * 5.0 floatargs.append(val) # normalized currently for the Aqsis renderer tags.append("interpolateboundary") nargs.append(0) nargs.append(0) params = {"P":points, "N":normals} if mesh.faceUV == 1: params["st"] = st elif mesh.vertexColors == 1: vCol = [] for vertCol in Cs: vCol.append([vertCol.r / 256.0, vertCol.g / 256.0, vertCol.b / 256.0]) #print Cs params["Cs"] = vCol else: pass #params["st"] = vertTexUV if 1 == 2: print "nfaces: ", nfaces print "nverts: ", nverts print "vertids: ", vertids print "ntags: ", len(tags) print "tags: ", tags print "nargs: ", nargs print "intargs: ", intargs print "floatargs: ", floatargs print "params: ", params # and now to build the call if mesh.faceUV == 1: ri.RiDeclare("st", "facevarying float[2]") # declare ST just in case subdiv = ri.RiSubdivisionMesh("catmull-clark", nverts, vertids, tags, nargs, intargs, floatargs, params) # Object UIs class ObjectUI: """ Object editor panel """ protocols.advise(instancesProvide=[IObjectUI], asAdapterForTypes=[BtoRLattice, BtoRArmature, BtoRBasicObject, BtoREmpty, BtoRWave]) def __init__(self, obj): dict = globals() self.settings = dict["instBtoRSettings"] self.evt_manager = dict["instBtoREvtManager"] self.scene = dict["instBtoRSceneSettings"] self.materials = dict["instBtoRMaterials"] self.objecteditor = dict["instBtoRObjects"] self.grouplist = dict["instBtoRGroupList"] self.helpwindow = dict["instBtoRHelp"] self.lighting = dict["instBtoRLightManager"] self.mat_selector = self.materials.getSelector() self.editorPanel = ui.Panel(4, 70, 255, 320, "Empty Panel", "", None, False) self.editorPanel.titleColor = [255,255,255, 255] self.editorPanel.hasHeader = False self.editorPanel.cornermask = 0 self.editorPanel.shadowed = False self.editorPanel.outlined = False self.editorPanel.addElement(ui.Label(10, 25, "Object Properties", "Object Properties:", self.editorPanel, True)) self.scroller= ui.ScrollPane(10, 50, 240, 235, "Scroller", "Scroller", self.editorPanel, True) self.attButton = ui.Button(self.editorPanel.width - 90, 0, 80, 25, "Atts", "Attributes", 'small', self.editorPanel, True) self.attButton.registerCallback("release", self.showAttributes) # hovering panel on the right for renderer-specific attributes self.attributePanel = ui.Panel(self.editorPanel.width + 10, 0, 255, 320, "Atts", " Renderer Specific Attributes", self.editorPanel, True, fontsize = 'small') self.attributePanel.isVisible = False self.attributePanel.hasHeader = False self.attributePanel.shadowed = True self.attributePanel.outlined = True self.attributeScroller = ui.ScrollPane(5, 25, 245, 295, "Scroller", "Scroller", self.attributePanel, True) self.properties = {} self.editors = {} # setup renderer-specific options if self.__dict__.has_key("optionOrder"): for option in self.optionOrder: propertyName = self.options[option][0] propertyValue = self.options[option][1] # generate a list of option panels here and allow editing # create a property for each option self.properties[option] = IProperty(propertyValue) # 1st item is the property name, second item is the property initializer self.properties[option].setName(propertyName) self.properties[option].setWidth(self.scroller.width - 15) # takes up half the available space of the main pane self.editors[option] = IPropertyEditor(self.properties[option]) self.scroller.addElement(self.editors[option].getEditor()) # and that should be that. When this is discarded, all those go away self.editors[option].setParent(self.scroller) self.scroller.offset = 0 self.setupAttributes() self.settings.rendererListeners.append(self) def setupAttributes(self): self.rendererAtts = {} self.rendererAttEditors = {} # setup renderer attributes atts = self.settings.getRendererAttributes() for att in atts: self.rendererAtts[att] = IProperty(atts[att][1]) self.rendererAtts[att].setName(atts[att][0]) self.rendererAtts[att].setWidth(self.attributeScroller.width - 15) self.rendererAttEditors[att] = IPropertyEditor(self.rendererAtts[att]) self.attributeScroller.addElement(self.rendererAttEditors[att].getEditor()) self.rendererAttEditors[att].setParent(self.attributeScroller) self.attributeScroller.offset = 0 def updateAttributes(self): self.attributeScroller.clearElements() self.setupAttributes() def getEditor(self): """ get the object editor for this object. """ return self.editorPanel def reloadOptions(self): self.scroller.clearElements() # simple enough to reload everything from the options array for option in self.optionOrder: self.scroller.addElement(self.editors[option].getEditor()) def showAttributes(self, button): if self.attributePanel.isVisible: self.attributePanel.hide() else: self.attributePanel.show() class MeshUI(ObjectUI): object_output_options = ["Mesh", "Renderman Primitive", "RA Proxy", "RA Procedural"] mesh_output_options = ["basic", "SubDiv"] protocols.advise(instancesProvide=[IObjectUI], asAdapterForTypes=[BtoRMesh]) """ This is the object editor. Here you can assign materials, and set various options for the export. """ def __init__(self, obj): self.options = { "material":["material", BtoRMaterialType("None Selected")], "AutoCrease" : ["Automatic Creasing?", False], "MaxCreaseAngle" : ["Max AutoCrease Angle", 90.0], "RIBEntity" : ["Save as RIB Entity", False], "IncludeMats" : ["Include Materials in RIB Entity", True], "ShadowMats" : [ "Include Materials in Shadowmap", False], "RenderInShadowPass" : [ "Include object in Shadowmap", True], "GenEnvMaps" : ["Generate Environment Maps" , False], "MapType" : ["Environment Map Type:", { "Cubic": "cubic", "Spherical": "spherical"}], "RenderInEnvMaps" : ["Include object in Environment Maps", True], "EnvMapPixelFilter" : ["EnvMap Pixel Filter", {"box":"box", "triangle":"triangle", "catmull-rom":"catmull-rom", "sinc":"sinc", "gaussian":"gaussian"}], "EnvMapFilterX":["EnvMap Filter size X", 1], "EnvMapFilterY":["EnvMap Filter size Y", 1], "EnvMapSamplesX":["EnvMap Samples X", 1], "EnvMapSamplesY":["EnvMap Samples Y", 1], "EnvMapShadingRate" : ["EnvMapShadingRate", 1.0], "DefineAsObj" : ["Define as RiObject", False] , "InplaceInstance" : ["In-Place Instancing", False], "InstanceCount" : ["Number of Instances", 1], "AutoRandomize" : ["Auto Randomize Material", False], "OutputOptions" : ["Object Output Options:", {"Mesh" : "mesh", "Renderman Primitive" : "primtive", "RA Proxy" : "proxy", "RA Procedural" : "procedural" }], "Matte" : ["Treat as Matte", False], "Ignore" : ["Don't Export Object:", False], "Sides" : ["Sides", 1], "ShadingRate" : ["Shading Rate", 1.0], "DispBound" : ["Displacement Bound", 2.0], "DispCoords" : ["Disp Bound Coord sys", {"Object" : "object", "Shader":"shader", "NDC": "ndc", "World": "world"}], "ExportCs" : ["Export Vertex Colors(Cs)", True], "ExportSt" : ["Export Texture Coordinates(s/t)", True], "FacePatches":["Export Faces as Patches:", False], "interpolateBoundary":["Interpolate Subdiv Boundary:", True] } self.optionOrder = ["material", "OutputOptions", "AutoCrease", "MaxCreaseAngle", "RenderInShadowPass", "ShadowMats", "GenEnvMaps", "EnvMapPixelFilter", "EnvMapFilterX", "EnvMapFilterY", "EnvMapSamplesX", "EnvMapSamplesY", "EnvMapShadingRate", "RenderInEnvMaps", "Matte", "Ignore", "Sides", "ShadingRate", "DispBound", "ExportCs", "ExportSt", "FacePatches", "interpolateBoundary"] # preinitialize a material property ObjectUI.__init__(self, obj) # commented out temporarily. Will return to service as global object editor for *all* objects, not just meshes #self.exportButton = ui.Button(self.editorPanel.width - 185, self.editorPanel.height - 25, 180, 25, "Export", "Export Object", 'normal', self.editorPanel, True) #self.exportButton.registerCallback("release", self.showExport) #self.exportSettings = btor.BtoRMain.ExportSettings() #self.exportSettings.export_functions.append(self.objecteditor.exportSingleObject) # this should do the trick, but should apply to every object that's exportable standalone self.helpText = """ This is a test. Here are some lines. And more lines. and yet more! """ def showHelp(self, button): print "Here is my docstring!", self.__doc__ self.helpwindow.setText(self.helpText) self.evt_manager.addElement(self.helpwindow.getEditor()) def showExport(self, obj): """ Display the mesh object export dialog. """ self.evt_manager.addElement(self.exportSettings.getEditor()) def setArchive(self, obj): if obj.getValue(): self.includeMaterials.isVisible = True else: self.includeMaterials.isVisible = False def setDefine(self, obj): if obj.getValue(): self.defineObjectVisGroup.show() else: self.defineObjectVisGroup.hide() class LampUI(ObjectUI): protocols.advise(instancesProvide=[IObjectUI], asAdapterForTypes=[BtoRLamp]) def __init__(self, obj): self.options = { "globalLight":["Light is global:", False], "transformLight":["Use Light Transform:", False], "autoLighting":["Automatic Lighting:", True], "IncludeWithAO" : ["Include light with AO?", False], "Multiplier" : ["Multiplier", 1.0], "GenShadowMap" : ["Render Shadow Maps:", {"Lazy" : "lazy", "Always" : "always", "Never" : "never"}, "lazy"], "ShadowMapSize" : ["Shadow Map Size", { "256" : 256, "512" : 512, "1024" : 1024, "2048" : 2048 }, "256"], "ShadowMapEyeSplits" : ["Max Eyesplits for Map", 5], "DepthFilter" : ["Midpoint Depthfilter?", True], "ShadowmapSamples" : ["ShadowMap Samples:", 1], "ShadowMapJitter" : ["ShadowMap Jitter:", 0.0], "ShadowMapWindow" : ["ScreenWindow Size:", {"5" : 5, "10" : 10, "15": 15, "20" : 20, "50" : 50, "100" : 100 }, "5"], "ShowZBuffer" : ["Show Z Buffer?", False], "Group" : ["Occlusion Group:", {"None Selected":"none Selected"}, "None Selected"]} self.optionOrder = ["globalLight", "autoLighting", "GenShadowMap", "ShadowMapSize", "ShadowMapJitter", "ShadowMapEyeSplits", "Multiplier", "IncludeWithAO", "transformLight", "DepthFilter", "ShadowMapWindow", "ShowZBuffer"] # occlusion group property here # "Group", ObjectUI.__init__(self, obj) # assign custom stuff to any properties that need it #self.lighting.occlListeners.append(self.editors["Group"]) #self.editors["Group"].updateMenu(self.lighting.occlusion_menu) # to catch any stragglers def getSelector(self): return self.selector def showShader(self): if self.shaderPanel.isVisible: self.shaderPanel.isVisible = False else: self.shaderPanel.x = self.editorPanel.parent.width + 15 self.shaderPanel.isVisible = True def setShader(self, shader): self.options["shader"] = ["Light Shader:", BtoRShaderType(shader)] self.properties["shader"] = IProperty(self.options["shader"][1]) self.properties["shader"].setWidth(self.scroller.width - 15) self.properties["shader"].setName("Light Shader:") self.optionOrder.insert(0, "shader") self.editors["shader"] = IPropertyEditor(self.properties["shader"]) self.editors["shader"].setParent(self.scroller) self.properties["shader"].getValue().getObject().obj_parent = self self.shaderPanel = self.properties["shader"].getValue().getObject().getEditor() self.shaderPanel.parent = self.editorPanel self.editorPanel.addElement(self.shaderPanel) self.shaderPanel.isVisible = False self.shaderPanel.shadowed = True self.shaderPanel.outlined = True self.shaderPanel.hasHeader = False self.shaderPanel.invalid = True self.shaderPanel.validate() self.reloadOptions() class MBallUI(ObjectUI): protocols.advise(instancesProvide=[IObjectUI], asAdapterForTypes=[BtoRMBall]) def __init__(self, obj): ObjectUI.__init__(self, obj) class CurveUI(ObjectUI): """ A UI for the curve type """ protocols.advise(instancesProvide=[IObjectUI], asAdapterForTypes=[BtoRCurve]) def __init__(self,obj): self.options = { "material":["material", BtoRMaterialType("None Selected")], "width" : ["Curve width:", 1.0], "wrap" : ["Wrap:", {"Periodic" : "periodic", "Non-Periodic" : "nonperiodic" }], "interp" : ["Interpolation:", { "Linear" : "linear", "Cubic" : "cubic" }] } self.optionOrder = ["material", "width", "wrap", "interp"] ObjectUI.__init__(self, obj) class SurfaceUI(ObjectUI): """ A UI for the surface type """ protocols.advise(instancesProvide=[IObjectUI], asAdapterForTypes=[BtoRSurf]) def __init__(self, obj): ObjectUI.__init__(self, obj) class CameraUI(ObjectUI): """ A UI for the camera type """ protocols.advise(instancesProvide=[IObjectUI], asAdapterForTypes=[BtoRCamera]) def __init__(self, obj): self.options = { "DOF" : ["Use Depth of Field?", False], "autoImager" : ["Automatic Background:", True], "fstop" : ["F-stop", 22], "focallength" : ["Focal Length", 45], "focaldistance" : ["Focal Distance:", 10] } self.optionOrder = ["autoImager", "DOF", "fstop", "focallength", "focaldistance"] ObjectUI.__init__(self, obj) #self.editorPanel.addElement(ui.Label(10, 30, "Imager Shader:", "Imager Shader:", self.editorPanel, False)) #self.shaderButton = ui.Button(self.editorPanel.get_string_width("Imager Shader:", 'normal') + 15, 30, 125, 25, "Imager Shader", "None Selected", 'normal', self.editorPanel, True) # self.shaderButton.registerCallback("release", self.shader.showEditor) def setShader(self, shader): self.options["shader"] = ["Imager Shader:", BtoRShaderType(shader)] self.properties["shader"] = IProperty(self.options["shader"][1]) self.properties["shader"].setWidth(self.scroller.width - 15) self.properties["shader"].setName("Imager Shader:") self.optionOrder.insert(0, "shader") self.editors["shader"] = IPropertyEditor(self.properties["shader"]) self.editors["shader"].setParent(self.scroller) self.properties["shader"].getValue().getObject().obj_parent = self # shader panel setup self.shaderPanel = self.properties["shader"].getValue().getObject().getEditor() self.shaderPanel.parent = self.editorPanel self.editorPanel.addElement(self.shaderPanel) self.shaderPanel.isVisible = False self.shaderPanel.shadowed = True self.shaderPanel.outlined = True self.shaderPanel.hasHeader = False self.shaderPanel.invalid = True self.shaderPanel.validate() self.reloadOptions() def showShader(self): if self.shaderPanel.isVisible: self.shaderPanel.isVisible = False else: self.shaderPanel.x = self.editorPanel.parent.width + 15 self.shaderPanel.isVisible = True # material derivative UIs class ShaderParamUI: protocols.advise(instancesProvide=[IShaderParamUI], asAdapterForTypes=[BtoRStringParam, BtoRArrayParam, BtoRMatrixParam]) def __init__(self, obj): self.materialName = obj.getMaterialName() self.shader = obj.getShader() self.parameter = obj.getParameter() self.type = getType() self.editorPanel = ui.Panel(0, 0, 100, 300, "Parameter: %s" % parameter, "Parameter: %s" % parameter, None, False) if type in ["String", "Matrix", "Array"]: self.editorPanel.addElement(ui.Label(10, 25, "None", "This parameter type has no editor defined yet.", self.editorPanel, False)) def getEditor(self): return self.editorPanel def getVariance(self, index): pass def calcRange(self, button): pass def setRangeLength(self, length): pass class FloatShaderParamUI(ShaderParamUI): protocols.advise(instancesProvide=[IShaderParamUI], asAdapterForTypes=[BtoRFloatParam]) def __init__(self, obj): ShaderParamUI.__init__(self, obj) self.materialName = obj.getMaterial() self.shader = obj.getShader() self.parameter = obj.getParameter() self.type = getType() self.rangeLength = rangeLength self.editorPanel.addElement(ui.Label(10, 25, "Range start:", "Range start:", self.editorPanel, False)) self.rangeStart = ui.TextField(85, 25, 85, 25, "RangeStart", 0.0, self.editorPanel, True) self.rangeStart.registerCallback("update", self.calcRange) self.editorPanel.addElement(ui.Label(10, 60, "Range end:", "Range end:", self.editorPanel, False)) self.rangeEnd = ui.TextField(85, 60, 85, 25, "RangeEnd", 1.0, self.editorPanel, True) self.rangeEnd.registerCallback("update", self.calcRange) self.linearDist = ui.CheckBox(10, 85, "Linear Values", "Linear Values", True, self.editorPanel, True) self.randomDist = ui.CheckBox(10, 110, "Random Distribution", "Random values", False, self.editorPanel, True) self.editorPanel.addElement(ui.Label(10, 125, "Random Seed:", "Random Seed:", self.editorPanel, False)) self.rSeed = ui.TextField(85, 125, 85, 25, "Random seed", 1, self.editorPanel, True) self.editorPanel.addElement(ui.Label(10, 140, "Increment:", "Increment", self.editorPanel, False)) self.increment = ui.TextField(85, 140, 85, 25, "Increment", 0.5, self.editorPanel, True) self.vRange = range(rangeLength) def randomize(self): # randomize the value for this item return random.uniform(self.rangeStart, self.rangeEnd) def getVariance(self, index): return self.vRange(index) def calcRange(self, button): start = float(self.rangeStart.getValue()) end = float(self.rangeEnd.getValue()) length = end - start inc = length / len(self.vRange) val = start if self.linearDist.getValue(): for idx in range(len(self.vRange)): self.vRange[idx] = val val = val + inc else: # initialize the seed value. I need this so I can always get back the same information after a material's been saved and restored random.seed(float(self.rSeed.getValue())) for idx in range(len(self.vRange)): self.vRange[idx] = random.uniform(start, end) def setRangeLength(self, length): self.rangeLength = length self.vRange = range(length) class VecShaderParamUI(ShaderParamUI): protocols.advise(instancesProvide=[IShaderParamUI], asAdapterForTypes=[BtoRColorParam, BtoRPointParam, BtoRVectorParam, BtoRNormalParam]) def __init__(self, obj): ShaderParamUI.__init__(self, obj) def setup(self, materialName, shader, type, parameter, rangeLength): width = 45 if type == "color": a = "R:" b = "G:" c = "B:" else: a = "X:" b = "Y:" c = "Z:" self.editorPanel.addElement(ui.Label(25, 0, "X Range start:", "Range start:", self.editorPanel, False)) self.editorPanel.addElement(ui.Label(90, 80, "X Range end:", "Range end:", self.editorPanel, False)) self.editorPanel.addElement(ui.Label(10, 25, "X", a, self.editorPanel, False)) self.xRangeStart = ui.TextField(25, 25, width, 25, "X RangeStart", 0.0, self.editorPanel, True) self.xRangeStart.registerCallback("update", self.calcRange) self.xRangeEnd = ui.TextField(90, 25,width, 25, "X RangeEnd", 1.0, self.editorPanel, True) self.xRangeEnd.registerCallback("update", self.calcRange) self.xDelta = ui.CheckBox(110, 25, "Delta", "Delta", False, self.editorPanel, True) self.editorPanel.addElement(ui.Label(10, 60, "Y", b, self.editorPanel, True)) self.yRangeStart = ui.TextField(25, 60, width, 25, "Y RangeStart", 0.0, self.editorPanel, True) self.yRangeStart.registerCallback("update", self.calcRange) self.yRangeEnd = ui.TextField(90, 60, width, 25, "Y RangeEnd", 1.0, self.editorPanel, True) self.yRangeEnd.registerCallback("update", self.calcRange) self.yDelta = ui.CheckBox(110, 60, "Delta", "Delta", False, self.editorPanel, True) self.editorPanel.addElement(ui.Label(10, 95, "Z:", c, self.editorPanel, True)) self.zRangeStart = ui.TextField(25, 95, width, 25, "X RangeStart", 0.0, self.editorPanel, True) self.zRangeStart.registerCallback("update", self.calcRange) self.zRangeEnd = ui.TextField(90, 95, width, 25, "X RangeEnd", 1.0, self.editorPanel, True) self.zRangeEnd.registerCallback("update", self.calcRange) self.zDelta = ui.CheckBox(110, 95, "Delta", "Delta", False, self.editorPanel, True) self.linearDist = ui.CheckBox(10, 115, "Linear Values", "Linear Values", True, self.editorPanel, True) self.randomDist = ui.CheckBox(10, 140, "Random Distribution", "Random values", False, self.editorPanel, True) self.editorPanel.addElement(ui.Label(10, 155, "Random Seed:", "Random Seed:", self.editorPanel, False)) self.rSeed = ui.TextField(85, 155, width, 25, "Random seed", 1, self.editorPanel, True) self.vRange = range(rangeLength) for idx in range(rangeLength): self.vRange[idx] = [] # should I return a vec3 here? def getVariance(self, index): return self.vRange(index) def calcRange(self, button): start = float(self.rangeStart.getValue()) end = float(self.rangeEnd.getValue()) length = end - start inc = length / len(self.vRange) val = start if self.linearDist.getValue(): for idx in range(len(self.vRange)): self.vRange[idx] = val val = val + inc else: # initialize the seed value. I need this so I can always get back the same information after a material's been saved and restored random.seed(float(self.rSeed.getValue())) for idx in range(len(self.vRange)): vec= [] vec.append(randon.uniform(start, end)) vec.append(random.uniform(start, end)) vec.append(random.uniform(start, end)) self.vRange[idx] def setRangeLength(self, length): self.vRange = range(length) # BtoR-Specific objects
karstenda/aqsis
tools/integration/BtoR/BtoRAdapterClasses.py
Python
gpl-2.0
125,137
[ "Gaussian" ]
68916432a563ca41ae4330b7bd07164eb38d1e5a15d677ca2aaa070b910538a1
import vtk from vtk.util import numpy_support as ns import numpy as np from six.moves import range from .tractography import Tractography from functools import reduce def tractography_from_vtk_files(vtk_file_names): tr = Tractography() if isinstance(vtk_file_names, str): vtk_file_names = [vtk_file_names] for file_name in vtk_file_names: tracts = read_vtkPolyData(file_name) tr.append(tracts.tracts(), tracts.tracts_data()) return tr def tractography_to_vtk_file(vtk_file_name, tractography): return write_vtkPolyData( vtk_file_name, tractography.tracts(), tractography.tracts_data() ) def read_vtkPolyData(filename): r''' Reads a VTKPolyData file and outputs a tracts/tracts_data pair Parameters ---------- filename : str VTKPolyData filename Returns ------- tracts : list of float array N_ix3 Each element of the list is a tract represented as point array, the length of the i-th tract is N_i tract_data : dict of <data name>= list of float array of N_ixM Each element in the list corresponds to a tract, N_i is the length of the i-th tract and M is the number of components of that data type. ''' if filename.endswith('xml') or filename.endswith('vtp'): polydata_reader = vtk.vtkXMLPolyDataReader() else: polydata_reader = vtk.vtkPolyDataReader() polydata_reader.SetFileName(filename) polydata_reader.Update() polydata = polydata_reader.GetOutput() return vtkPolyData_to_tracts(polydata) def vtkPolyData_to_tracts(polydata, return_tractography_object=True): r''' Reads a VTKPolyData object and outputs a tracts/tracts_data pair Parameters ---------- polydata : vtkPolyData VTKPolyData Object Returns ------- tracts : list of float array N_ix3 Each element of the list is a tract represented as point array, the length of the i-th tract is N_i tract_data : dict of <data name>= list of float array of N_ixM Each element in the list corresponds to a tract, N_i is the length of the i-th tract and M is the number of components of that data type. ''' result = {} result['lines'] = ns.vtk_to_numpy(polydata.GetLines().GetData()) result['points'] = ns.vtk_to_numpy(polydata.GetPoints().GetData()) result['numberOfLines'] = polydata.GetNumberOfLines() data = {} if polydata.GetPointData().GetScalars(): data['ActiveScalars'] = polydata.GetPointData().GetScalars().GetName() if polydata.GetPointData().GetVectors(): data['ActiveVectors'] = polydata.GetPointData().GetVectors().GetName() if polydata.GetPointData().GetTensors(): data['ActiveTensors'] = polydata.GetPointData().GetTensors().GetName() for i in range(polydata.GetPointData().GetNumberOfArrays()): array = polydata.GetPointData().GetArray(i) np_array = ns.vtk_to_numpy(array) if np_array.ndim == 1: np_array = np_array.reshape(len(np_array), 1) data[polydata.GetPointData().GetArrayName(i)] = np_array result['pointData'] = data tracts, data = vtkPolyData_dictionary_to_tracts_and_data(result) if return_tractography_object: tr = Tractography() tr.append(tracts, data) return tr else: return tracts, data def vtkPolyData_dictionary_to_tracts_and_data(dictionary): r''' Create a tractography from a dictionary organized as a VTK poly data. Parameters ---------- dictionary : dict Dictionary containing the elements for a tractography points : array Nx3 of float each element is a point in RAS space lines : Mx1 of int The array is organized as: K, ix_1, ..., ix_k, L, ix_1, ..., ix_L For instance the array [4, 0, 1, 2, 3] means that that line is formed by the sequence of points 0, 1, 2 and 3 on the points array. 'numberOfLines' : int The total number of lines in the array. Returns ------- tracts : list of float array N_ix3 Each element of the list is a tract represented as point array, the length of the i-th tract is N_i tract_data : dict of <data name>= list of float array of N_ixM Each element in the list corresponds to a tract, N_i is the length of the i-th tract and M is the number of components of that data type. ''' dictionary_keys = set(('lines', 'points', 'numberOfLines')) if not dictionary_keys.issubset(dictionary.keys()): raise ValueError("Dictionary must have the keys lines and points" + repr( dictionary.keys())) # Tracts and Lines are the same thing tract_data = {} tracts = [] lines = np.asarray(dictionary['lines']).squeeze() points = dictionary['points'] actual_line_index = 0 number_of_tracts = dictionary['numberOfLines'] original_lines = [] for l in range(number_of_tracts): tracts.append( points[ lines[ actual_line_index + 1: actual_line_index + lines[actual_line_index] + 1 ] ] ) original_lines.append( np.array( lines[ actual_line_index + 1: actual_line_index + lines[actual_line_index] + 1], copy=True )) actual_line_index += lines[actual_line_index] + 1 if 'pointData' in dictionary: point_data_keys = [ it[0] for it in dictionary['pointData'].items() if isinstance(it[1], np.ndarray) ] for k in point_data_keys: array_data = dictionary['pointData'][k] if not k in tract_data: tract_data[k] = [ array_data[f] for f in original_lines ] else: np.vstack(tract_data[k]) tract_data[k].extend( [ array_data[f] for f in original_lines[-number_of_tracts:] ] ) return tracts, tract_data def vtkPolyData_to_lines(polydata): lines_ids = ns.vtk_to_numpy(polydata.GetLines().GetData()) points = ns.vtk_to_numpy(polydata.GetPoints().GetData()) lines = [] lines_indices = [] actual_line_index = 0 for i in range(polydata.GetNumberOfLines()): next_line_index = actual_line_index + lines_ids[actual_line_index] + 1 lines_indices.append(lines_ids[actual_line_index + 1: next_line_index]) lines.append(points[lines_indices[-1]]) actual_line_index = next_line_index point_data = {} for i in range(polydata.GetPointData().GetNumberOfArrays()): vtk_array = polydata.GetPointData().GetArray(i) array_data = ns.vtk_to_numpy(vtk_array) if array_data.ndim == 1: data = [ ns.numpy.ascontiguousarray(array_data[line_indices][:, None]) for line_indices in lines_indices ] else: data = [ ns.numpy.ascontiguousarray(array_data[line_indices]) for line_indices in lines_indices ] point_data[vtk_array.GetName()] = data scalars = polydata.GetPointData().GetScalars() if scalars is not None: point_data['ActiveScalars'] = scalars.GetName() vectors = polydata.GetPointData().GetVectors() if vectors is not None: point_data['ActiveVectors'] = vectors.GetName() tensors = polydata.GetPointData().GetTensors() if tensors is not None: point_data['ActiveTensors'] = tensors.GetName() return lines, lines_indices, point_data def tracts_to_vtkPolyData(tracts, tracts_data={}, lines_indices=None): if isinstance(tracts, Tractography): tracts_data = tracts.tracts_data() tracts = tracts.tracts() lengths = [len(p) for p in tracts] line_starts = ns.numpy.r_[0, ns.numpy.cumsum(lengths)] if lines_indices is None: lines_indices = [ ns.numpy.arange(length) + line_start for length, line_start in zip(lengths, line_starts) ] ids = ns.numpy.hstack([ ns.numpy.r_[c[0], c[1]] for c in zip(lengths, lines_indices) ]) vtk_ids = ns.numpy_to_vtkIdTypeArray(ids, deep=True) cell_array = vtk.vtkCellArray() cell_array.SetCells(len(tracts), vtk_ids) points = ns.numpy.vstack(tracts).astype( ns.get_vtk_to_numpy_typemap()[vtk.VTK_DOUBLE] ) points_array = ns.numpy_to_vtk(points, deep=True) poly_data = vtk.vtkPolyData() vtk_points = vtk.vtkPoints() vtk_points.SetData(points_array) poly_data.SetPoints(vtk_points) poly_data.SetLines(cell_array) saved_keys = set() for key, value in tracts_data.items(): if key in saved_keys: continue if key.startswith('Active'): saved_keys.add(value) name = value value = tracts_data[value] else: name = key if len(value) == len(tracts): if value[0].ndim == 1: value_ = ns.numpy.hstack(value)[:, None] else: value_ = ns.numpy.vstack(value) elif len(value) == len(points): value_ = value else: raise ValueError( "Data in %s does not have the correct number of items") vtk_value = ns.numpy_to_vtk( np.ascontiguousarray(value_, dtype=ns.get_vtk_to_numpy_typemap()[vtk.VTK_FLOAT]), deep=True ) vtk_value.SetName(name) if key == 'ActiveScalars' or key == 'Scalars_': poly_data.GetPointData().SetScalars(vtk_value) elif key == 'ActiveVectors' or key == 'Vectors_': poly_data.GetPointData().SetVectors(vtk_value) elif key == 'ActiveTensors' or key == 'Tensors_': poly_data.GetPointData().SetTensors(vtk_value) else: poly_data.GetPointData().AddArray(vtk_value) poly_data.BuildCells() return poly_data def write_vtkPolyData(filename, tracts, tracts_data={}): poly_data = tracts_to_vtkPolyData(tracts, tracts_data=tracts_data) if filename.endswith('.xml') or filename.endswith('.vtp'): writer = vtk.vtkXMLPolyDataWriter() writer.SetDataModeToBinary() else: writer = vtk.vtkPolyDataWriter() writer.SetFileTypeToBinary() writer.SetFileName(filename) if hasattr(vtk, 'VTK_MAJOR_VERSION') and vtk.VTK_MAJOR_VERSION > 5: writer.SetInputData(poly_data) else: writer.SetInput(poly_data) writer.Write() def writeLinesToVtkPolyData_pure_python(filename, lines, point_data={}): file_ = open(filename, 'w') file_.write(__header__) file_.write(__polyDataType__) number_of_points = sum([len(line) for line in lines]) file_.write(__points_header__(number_of_points)) for line in lines: for point in line: file_.write(str(point).strip()[1:-1] + '\n') number_of_lines = len(lines) file_.write(__lines_header__(number_of_lines, number_of_points)) points_for_line_saved = 0 for line in lines: file_.write( "%d %s \n" % ( len(line), reduce( lambda x, y: x + ' %d' % (y + points_for_line_saved), range(len(line)), '' ) )) points_for_line_saved += len(line) if point_data: file_.write(__point_data_header__(number_of_points)) active_keys = write_active_components(file_, point_data) write_field_data(file_, number_of_points, active_keys, point_data) file_.flush() file_.close() def get_number_of_components(data): if hasattr(data[0], 'shape'): if len(data[0].shape) == 0: return 1 return data[0].shape[-1] if hasattr(data[0][0], '__len__'): return len(data[0][0]) else: return 1 def write_active_components(file_, point_data): active_keys = [] for type_, fixed_number_of_components in [ ('Scalars', None), ('Vectors', 3), ('Tensors', 9) ]: active_tag = 'Active' + type_ if active_tag in point_data: name = point_data[active_tag] active_keys.append(name) data = point_data[name] number_of_components = get_number_of_components(data) if ( (fixed_number_of_components is not None) and (fixed_number_of_components != number_of_components) ): raise ValueError( "Active %s don't have %d components, it has %d" % ( type_, fixed_number_of_components, number_of_components) ) if type_ == 'Scalars': file_.write(__point_data_attribute_header__( type_.upper(), name, number_of_components) ) file_.write('LOOKUP_TABLE default\n') else: file_.write(__point_data_attribute_header__( type_.upper(), name)) write_line_data(file_, data) return active_keys def write_field_data(file_, number_of_points, active_keys, point_data): keys = ( set(point_data.keys()) - set(active_keys) - set([key for key, data in point_data.items() if isinstance(data, str)]) ) if not keys: return file_.write(__field_data_header__(len(keys))) for key in keys: data = point_data[key] name = key number_of_components = get_number_of_components(data) if sum(len(d) for d in data) != number_of_points: raise ValueError( "Attribute %s does not have a tuple per point in the line" % key) file_.write( __field_data_attribute_header__( name, number_of_components, number_of_points ) ) write_line_data(file_, data) def write_line_data(file_, data): for line in data: for attribute in line: file_.write( str(attribute).replace('[', '') .replace(']', '') .replace(', ', ' ') .strip() + '\n' ) def tractography_from_vtkPolyData(polydata): tractography = Tractography() tractography._originalFibers = [] tractography._tractData = {} tractography._originalLines = [] tractography._originalData = {} tractography._tracts = [] lines, lines_ids, point_data = vtkPolyData_to_lines(polydata) tractography._tracts = lines tractography._tractData = point_data tractography._originalFibers = np.vstack(lines) tractography._originalLines = lines_ids tractography._originalData = dict(( (key, np.vstack(value)) for key, value in tractography._tractData )) __header__ = """ # vtk DataFile Version 3.0 vtk output ASCII """ __polyDataType__ = "DATASET POLYDATA\n" def __points_header__(number_of_points): return "POINTS %d float\n" % number_of_points def __lines_header__(number_of_lines, number_of_points): return "LINES %d %d\n" % (number_of_lines, number_of_lines + number_of_points) def __point_data_header__(number_of_points): return "POINT_DATA %d\n" % number_of_points def __point_data_attribute_header__(type_, name, number_of_components=0): return "%s %s float %.d\n" % (type_, name, number_of_components) def __field_data_header__(number_of_arrays): return "FIELD FieldData %d\n" % number_of_arrays def __field_data_attribute_header__( array_name='', number_of_components=1, number_of_points=1, data_type='float' ): return " % s %d %d %s\n" % ( array_name, number_of_components, number_of_points, data_type )
demianw/tract_querier
tract_querier/tractography/vtkInterface.py
Python
bsd-3-clause
16,188
[ "VTK" ]
d9858e48230c6570d9f6cf1ec2636a3b0f58210a2b2704af3782902ef519b410
# -*- coding: utf-8 -*- """ Test helper functions and base classes. """ from .ga_helpers import GaccoTestMixin, SUPER_USER_INFO, GA_COURSE_SCORER_USER_INFO, GA_GLOBAL_COURSE_CREATOR_USER_INFO from ..pages.lms.auto_auth import AutoAuthPage from ..pages.lms.ga_instructor_dashboard import MembershipPageMemberListSection, InstructorDashboardPage class GaccoTestRoleMixin(GaccoTestMixin): def add_course_role(self, course_id, role_name, member): self.switch_to_user(SUPER_USER_INFO) instructor_dashboard_page = InstructorDashboardPage(self.browser, course_id).visit() instructor_dashboard_page.select_membership() MembershipPageMemberListSection(self.browser).wait_for_page().add_role_by_display_name(role_name, member) self.logout() def auto_auth_with_ga_global_course_creator(self, course_id): # Auto-auth register for the course AutoAuthPage( self.browser, username=GA_GLOBAL_COURSE_CREATOR_USER_INFO['username'], password=GA_GLOBAL_COURSE_CREATOR_USER_INFO['password'], email=GA_GLOBAL_COURSE_CREATOR_USER_INFO['email'], course_id=course_id ).visit() return GA_GLOBAL_COURSE_CREATOR_USER_INFO def auto_auth_with_ga_course_scorer(self, course_id): self.add_course_role(course_id, 'Course Scorer', GA_COURSE_SCORER_USER_INFO['email']) AutoAuthPage( self.browser, username=GA_COURSE_SCORER_USER_INFO['username'], password=GA_COURSE_SCORER_USER_INFO['password'], email=GA_COURSE_SCORER_USER_INFO['email'], course_id=course_id ).visit() return GA_COURSE_SCORER_USER_INFO
nttks/edx-platform
common/test/acceptance/tests/ga_role_helpers.py
Python
agpl-3.0
1,707
[ "VisIt" ]
986008b3beba4cc6c4e728d2d3dfd47f06df18ac04b3dfe727f44da6fef77d81
import argparse import json from collections import defaultdict from multiprocessing.dummy import Pool import requests import logging from ya_courier_helpers.util import chunks, get_duplicates, post_request, \ get_request, delete_request, get_mvrp_request, get_mvrp_solution, valid_date FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO) MAX_REF_LEN = 80 def get_routes(date): routes = get_request('routes?date={}'.format(date)) return {x['number']: x for x in routes} def get_orders(route_id): orders = get_request('orders?route_id={}'.format(route_id)) return {x['number']: x for x in orders} def upload_depot(depot, depot_address): data = { 'number': depot.get('ref', str(depot['id'])), 'name': depot.get('ref', str(depot['id'])), 'address': depot_address, 'lat': depot['point']['lat'], 'lon': depot['point']['lon'] } if 'service_duration_s' in depot: data['service_duration_s'] = depot['service_duration_s'] if 'time_window' in depot: data['time_interval'] = depot['time_window'] j = post_request('depots-batch', [data]) assert j['inserted'] + j['updated'] == 1 logging.info('Depot uploaded') def upload_couriers(couriers): j = post_request('couriers-batch', [{'number': c.get('ref', str(c['id'])), 'name': c['ref']} for c in couriers]) assert j['inserted'] + j['updated'] == len(couriers) logging.info('{} couriers uploaded'.format(len(couriers))) def upload_routes(solution, depot): veh_dict = { v['id']: v for v in solution['vehicles'] } j = post_request('routes-batch', [ { 'number': '{}-{}-{}'.format(veh_dict[r['vehicle_id']]['ref'], r['shift']['id'], solution['options']['date']), 'date': solution['options']['date'], 'depot_number': depot.get('ref', str(depot['id'])), 'courier_number': veh_dict[r['vehicle_id']].get('ref', str(r['vehicle_id'])) } for r in solution['routes'] ]) assert j['inserted'] + j['updated'] == len(solution['routes']) logging.info('{} routes uploaded for date {}'.format(len(solution['routes']), solution['options']['date'])) def upload_orders(locations, solution, orders_dict, phone): loc2veh = {} loc2shift = {} loc2multi = {} for route in solution['routes']: for loc in route['route']: if loc['node']['type'] == 'location': loc2veh[loc['node']['value']['id']] = route['vehicle_id'] loc2shift[loc['node']['value']['id']] = route['shift']['id'] loc2multi[loc['node']['value']['id']] = loc['multi_order'] veh_dict = { v['id']: v for v in solution['vehicles'] } dropped_locations = {l['id']: l for l in solution['dropped_locations']} dropped_count = 0 skipped_count = 0 data = [] for l in locations: if l['id'] in loc2veh and not l['ref'].startswith('respawn_') and not l['ref'].startswith('FAKE'): data.append({ 'number': l['ref'], 'lat': l['point']['lat'], 'lon': l['point']['lon'], 'address': l.get('description', '-'), 'phone': orders_dict[l['ref']].get('customer_phone', phone), 'service_duration_s': 0 if loc2multi[l['id']] else l.get('service_duration_s', 0) + l.get('shared_service_duration_s', 0), 'status': 'confirmed', 'customer_name': l.get('title', '-'), 'weight': orders_dict[l['ref']].get('weight_kg', 0), 'time_interval': l['time_window'], 'route_number': '{}-{}-{}'.format(veh_dict[loc2veh[l['id']]]['ref'], loc2shift[l['id']], solution['options']['date']) }) elif l['id'] in dropped_locations: dropped_count += 1 logging.error('Order {} is dropped. Skipping it.'.format(dropped_locations[l['id']]['ref'])) else: skipped_count += 1 logging.error('Order {} was skipped.'.format(l['ref'])) j = post_request('orders-batch', data) assert j['inserted'] + j['updated'] == len( locations) - dropped_count - skipped_count, 'Requested: {}, Updated: {}, Inserted: {}'.format(len(locations), j['updated'], j['inserted']) logging.info('{} locations uploaded'.format(len(locations))) def fix_orders(solution): veh_dict = { v['id']: v for v in solution['vehicles'] } routes_dict = get_routes(solution['options']['date']) for r in solution['routes']: route_number = '{}-{}-{}'.format(veh_dict[r['vehicle_id']]['ref'], r['shift']['id'], solution['options']['date']) route_id = routes_dict[route_number]['id'] new_orders = [ x['node']['value']['ref'] for x in r['route'] if x['node']['type'] == 'location' and not x['node']['value']['ref'].startswith('respawn_') and not x['node']['value']['ref'].startswith('FAKE') ] logging.info(','.join(new_orders)) logging.info('Got {} new orders in route {}'.format(len(new_orders), route_number)) j = get_request( url='orders?route_id={}'.format(route_id) ) order_numbers_dict = {str(o['id']): o['number'] for o in j} # logging.info([order_numbers_dict[str(x['id'])] for x in j]) # logging.info(new_orders) old_orders = [ order_numbers_dict[str(x['id'])] for x in j if order_numbers_dict[str(x['id'])] not in new_orders ] logging.info(','.join(old_orders)) logging.info('Found {} old orders in route {}'.format(len(old_orders), route_number)) try: delete_request( url='routes/{}/fix-orders'.format(route_id) ) logging.info('Route {} was cleared from fixed orders'.format(route_number)) except requests.HTTPError as e: if e.response.status_code in (422, 500): continue else: raise post_request( url='routes/{}/fix-orders'.format(route_id), data={'orders': old_orders + new_orders} ) logging.info('Route {} with {}+{} orders was fixed'.format(route_number, len(old_orders), len(new_orders))) def clear_fixed_orders(solution): veh_dict = { v['id']: v for v in solution['vehicles'] } routes_dict = get_routes(solution['options']['date']) for r in solution['routes']: route_number = '{}-{}'.format(veh_dict[r['vehicle_id']]['ref'], solution['options']['date']) if route_number in routes_dict: route_id = routes_dict[route_number]['id'] try: delete_request( url='routes/{}/fix-orders'.format(route_id) ) logging.info('Route {} was cleared from fixed orders'.format(route_number)) except requests.HTTPError as e: if e.response.status_code in (422, 500): continue else: raise def delete_route_and_orders(args): route_number, route = args orders_dict = get_orders(route['id']) for order_number, order in orders_dict.items(): delete_request( url='orders/{}'.format(order['id']) ) logging.info('{} orders from route {} were deleted'.format(len(orders_dict), route_number)) delete_request( url='routes/{}'.format(route['id']) ) logging.info('Route {} was deleted'.format(route_number)) def delete_routes_and_orders(date): routes_dict = get_routes(date) logging.info('Found {} routes for date {}'.format(len(routes_dict), date)) with Pool(10) as p: p.map(delete_route_and_orders, routes_dict.items()) logging.info('DATA DELETED SUCCESSFULLY') def assert_solution(s): couriers_nums = [v['ref'] for v in s['vehicles']] duplicate_vehicle_nums = get_duplicates(couriers_nums) if duplicate_vehicle_nums: logging.error('Duplicate vehicles found: {}'.format(duplicate_vehicle_nums)) for v in s['vehicles']: if v['ref'] == duplicate_vehicle_nums[0]: logging.error(json.dumps(v, indent=4)) def assert_request(r): orders_nums = [l['ref'] for l in r['locations']] duplicate_loc_nums = get_duplicates(orders_nums) if duplicate_loc_nums: logging.error('Duplicate locations found: {}'.format(duplicate_loc_nums)) for l in r['locations']: if l['ref'] == duplicate_loc_nums[0]: logging.error(json.dumps(l, indent=4)) def upload_data(solver_request, solver_solution, orders_dict, depot_address, date=None, phone='+71111111111'): r = solver_request s = solver_solution assert_request(r) assert_solution(s) if date: s['options']['date'] = date r['options']['date'] = date logging.info('Uploading data for date: {}'.format(s['options']['date'])) upload_depot(r['depot'], depot_address) upload_couriers([v for v in r['vehicles'] if v['id'] in [r['vehicle_id'] for r in s['routes']]]) upload_routes(s, r['depot']) for chunk in chunks(r['locations'], 500): upload_orders(chunk, s, orders_dict, phone) fix_orders(s) logging.info('DATA UPLOADED SUCCESSFULLY') def parse_args(): parser = argparse.ArgumentParser(usage=usage()) parser.add_argument('--task-id', required=True, help='Your MVRP task ID to upload to Ya.Courier') parser.add_argument('--date', type=valid_date, help='Upload data to this date') parser.add_argument('--clear', action='store_true', help='Clear ALL data for this date') return parser.parse_args() def usage(): return '\n\tYA_COURIER_TOKEN=<YA.COURIER TOKEN> YA_COURIER_COMPANY_ID=<YOUR COMPANY ID> ' + \ 'ya-courier-uploader --task-id <YOUR MVRP API TASK ID>\n\n' + \ 'This tool uploads MVRP solution to Ya.Courier Monitoring.\n\n' + \ 'For MVRP API documentation visit https://courier.yandex.ru/vrs/api/v1/doc\n' + \ 'For Ya.Courier API documentation visit https://courier.yandex.ru/api/v1/doc\n\n' def main(): args = parse_args() if args.clear: delete_routes_and_orders(args.date) else: req = get_mvrp_request(args.task_id) resp = get_mvrp_solution(args.task_id) for loc in req['locations']: if not loc.get('ref'): loc['ref'] = str(loc['id']) for route in resp['result']['routes']: for node in route['route']: if not node['node']['value'].get('ref'): node['node']['value']['ref'] = str(node['node']['value']['id']) assert len({loc['ref'] for loc in req['locations']}) == \ len({loc['ref'][:MAX_REF_LEN] for loc in req['locations']}), \ "Location ref length should be shorter than 80 characters" for loc in req['locations']: loc['ref'] = loc['ref'][:MAX_REF_LEN] for route in resp['result']['routes']: for node in route['route']: node['node']['value']['ref'] = node['node']['value']['ref'][:MAX_REF_LEN] upload_data( req, resp['result'], defaultdict(dict), req['depot'].get('ref', 'Склад'), date=args.date ) if __name__ == '__main__': main()
roschupkin/ya.courier.helpers
ya_courier_helpers/mvrp_solution_uploader.py
Python
apache-2.0
11,903
[ "VisIt" ]
454709c43e898aca453ffff2004b2b63f55a211bdb24bd5d48480d5e0cc44288
""" pyNEAT Copyright (C) 2007-2008 Brian Greer This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ from Synapse import * class Gene: def __init__(self, input=None, output=None, weight=0.0, recurrent=False, trait=None, enabled=True, mutation=0.0, innovation=0.0): if input is None or output is None: raise ValueError self.input = input self.output = output self.synapse = Synapse(input, output, weight, recurrent, trait) self.enabled = enabled self.mutation = mutation self.innovation = innovation
liquidkarma/pyneat
pyNEAT/Gene.py
Python
gpl-2.0
1,206
[ "Brian" ]
cc6e4dbdd51c4e0b73cb04aac96afb70378737e6b24b2cef974db36d9447ded4
from brian import * import time if __name__=='__main__': savelocation = '/home/achilleas/Documents/Uni/working_dir/simout.py/tmp/' input_freqs = arange(300,400,10) n_inputs = 50 numsims = len(input_freqs) duration = 2000*ms V_reset = 13.65*mV V_th = 15*mV V_rest = 0*mV V2_rest = 0*mV tau_mem = 10*ms tau_den = 10*ms capa = 1*ufarad capa2 = 1*ufarad resi = 10*kohm resi2 = 10*kohm junc_res = 1*kohm tau_den = capa2*resi2 tau_mem = capa*resi refr = 2*ms eqs = Equations(''' dV2/dt = (-V2+V2_rest)/tau_den + (V-V2)/(junc_res*capa2) : volt dV/dt = (-V+V_rest)/tau_mem + (V2 - V)/(junc_res*capa) : volt ''') DV_s = 0.16*mV nrns = NeuronGroup(numsims,eqs,reset=V_reset,threshold='V>V_th',refractory=refr) generator_rates = array([]) for grate in input_freqs: generator_rates = append(generator_rates,ones(n_inputs)*grate) inp = PoissonGroup(n_inputs*numsims,generator_rates) con_matrix = zeros([n_inputs*numsims,numsims]) for nrn in range(numsims): con_matrix[nrn*n_inputs:(nrn+1)*n_inputs,nrn] = ones(n_inputs)*DV_s cons = Connection(source=inp,target=nrns,state='V2',weight=con_matrix) out_mon = SpikeMonitor(nrns) count_mon = SpikeCounter(nrns) mem_mon = StateMonitor(nrns,'V',record=True) den_mon = StateMonitor(nrns,'V2',record=True) print "Running",numsims,"simulations ..." run(duration,report='stdout') print "Simulations DONE!" # for nrn in range(numsims): # clf() # figure(figsize=(15,12)) # hold(True) # plot(den_mon.times,den_mon[nrn]/mV,color=(0,0,1,0.3)) # plot(mem_mon.times,mem_mon[nrn]/mV,color=(1,0,0,1)) # xlabel('Time (s)') # ylabel('Potential (mV)') # legend(('Dendritic potential','Somatic potential')) # title(''' # V_reset: %s, V_th: %s, V_rest: %s, V2_rest: %s, tau_mem: %s, # tau_den: %s, C_soma: %s, C_den: %s. R_soma: %s, R_den: %s, # f_in: %s Hz, f_out: %s Hz # ''' % (V_reset, V_th, V_rest, V2_rest, tau_mem, tau_den, capa,\ # capa2, resi, resi2, input_freqs[nrn]*Hz, count_mon.count[nrn]/duration)) # figname = '%sinp%sHz.png' % (savelocation, input_freqs[nrn]) # savefig(figname) # print 'Saved %s' % figname transfer = np.zeros([numsims,2]) variability = np.zeros([numsims,2]) print "Done. Preparing plots ..." f_in = input_freqs numspikes = zeros(numsims) mean_isi = zeros(numsims) std_isi = zeros(numsims) for nrn in out_mon.spiketimes.iterkeys(): nrnspikes = out_mon.spiketimes[nrn] numspikes[nrn] = len(nrnspikes) if numspikes[nrn] > 2: isi = diff(nrnspikes) mean_isi[nrn] = mean(isi) std_isi[nrn] = std(isi) f_out = numspikes/duration cv = std_isi/mean_isi variability = array([mean_isi, cv]) transfer = array([f_in, f_out]) subplot(2,1,1) hold(True) plot(transfer[0,:], transfer[1,:],'.') xlabel('f_in (Hz)') ylabel('f_out (Hz)') hold(False) subplot(2,1,2) hold(True) t = arange(0.002,max(mean_isi),0.0001) theo_cv = sqrt((t-0.002)/t) plot(t,theo_cv) # theoretical cv curve plot(variability[0,:], variability[1,:],'.') xlabel('mean ISI') ylabel('CV') show()
achilleas-k/brian-scripts
twp_comp_transfer.py
Python
apache-2.0
3,358
[ "Brian" ]
440580530cbf56b42d8cde3da5444e56c5d34edbd2d0f89c8e76fcdaf2e439aa
# # Copyright (C) 2010-2017 Samuel Abels # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ Encryption related utilities. """ from __future__ import print_function, absolute_import from builtins import chr from builtins import ord from builtins import range from builtins import bytes import sys import string import random try: from Cryptodome.Hash import MD4 except ImportError: from Crypto.Hash import MD4 _VALIDSEEDCHARACTERS = string.ascii_letters + string.digits _DICTIONARY = [ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD", "AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA", "AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK", "ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE", "AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM", "BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET", "BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO", "BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT", "BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT", "CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY", "CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN", "DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG", "DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB", "DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO", "ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE", "EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW", "FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR", "FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP", "GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO", "GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD", "HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM", "HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT", "HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE", "HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL", "INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT", "ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET", "JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT", "KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB", "LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE", "LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT", "LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG", "LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW", "MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT", "MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG", "MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED", "NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD", "NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF", "OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL", "OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT", "OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD", "PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG", "PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT", "PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB", "PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT", "RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM", "RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB", "RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM", "SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET", "SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY", "SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY", "SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN", "TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE", "TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP", "TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP", "US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS", "WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT", "WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE", "YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT", "ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS", "ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE", "AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA", "ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN", "AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW", "ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA", "ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM", "AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW", "AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL", "BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM", "BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK", "BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH", "BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT", "BEAU", "BECK", "BEEF", "BEEN", "BEER", "BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN", "BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE", "BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE", "BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT", "BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK", "BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT", "BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK", "BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS", "BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN", "BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD", "BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG", "BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST", "BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF", "CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL", "CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL", "CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF", "CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG", "CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY", "CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA", "COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN", "COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK", "COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST", "COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB", "CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY", "CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE", "DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN", "DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS", "DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED", "DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK", "DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT", "DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES", "DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA", "DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG", "DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK", "DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK", "DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST", "EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT", "EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT", "EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED", "FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL", "FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT", "FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST", "FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE", "FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE", "FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW", "FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM", "FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL", "FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL", "FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY", "FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY", "FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA", "GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH", "GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE", "GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT", "GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN", "GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD", "GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG", "GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB", "GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN", "GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH", "GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR", "HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK", "HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE", "HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR", "HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL", "HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN", "HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT", "HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE", "HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK", "HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL", "HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK", "HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE", "HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH", "INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE", "ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE", "JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL", "JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN", "JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY", "JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST", "JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL", "KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL", "KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW", "KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD", "KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN", "LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD", "LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS", "LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER", "LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST", "LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU", "LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB", "LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST", "LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE", "LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD", "LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK", "LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE", "LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE", "MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI", "MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK", "MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE", "MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK", "MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH", "MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT", "MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS", "MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD", "MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON", "MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH", "MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK", "MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL", "NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR", "NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS", "NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA", "NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON", "NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB", "OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY", "OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE", "ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS", "OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY", "OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT", "RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE", "RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR", "RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA", "REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT", "RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD", "ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME", "ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS", "ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY", "RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE", "RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE", "SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE", "SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR", "SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK", "SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS", "SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN", "SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE", "SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE", "SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW", "SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY", "SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT", "SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB", "SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA", "SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE", "SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR", "STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH", "SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF", "SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM", "TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK", "TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM", "TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS", "TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN", "THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER", "TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY", "TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG", "TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR", "TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG", "TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE", "TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK", "TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER", "USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST", "VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY", "VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE", "WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK", "WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM", "WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY", "WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR", "WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM", "WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE", "WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE", "WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD", "WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE", "YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR", "YELL", "YOGA", "YOKE"] def _checksummed_long(key): thesum, k = 0, key for i in range(0, 32): thesum = thesum + (k % 4) k = k >> 2 return (key << 2) | (thesum % 4) def _sixword_from_long(key): key = _checksummed_long(key) words = [] for i in range(0, 6): words = [_DICTIONARY[key % 2048]] + words key = key >> 11 return ' '.join(words) def _long_from_raw(thehash): """Fold to a long, a digest supplied as a string.""" hashnum = 0 for h in thehash: hashnum <<= 8 hashnum |= ord(bytes([h])) return hashnum def _sixword_from_raw(key): return _sixword_from_long(_long_from_raw(key)) if sys.version_info[0] < 3: def _fold_md4_or_md5(digest): if len(digest) < 16: raise ValueError('digest is too short') result = b'' for i in range(0, 8): one = ord(bytes(digest[i])) two = ord(bytes(digest[i+8])) result = result + bytes([one^two]) return result else: def _fold_md4_or_md5(digest): if len(digest) < 16: raise ValueError('digest is too short') result = b'' for i in range(0, 8): #print(len(digest[i])) one = ord(bytes([digest[i]])) two = ord(bytes([digest[i+8]])) result = result + bytes([one^two]) return result def otp(password, seed, sequence): """ Calculates a one-time password hash using the given password, seed, and sequence number and returns it. Uses the MD4/sixword algorithm as supported by TACACS+ servers. :type password: str :param password: A password. :type seed: str :param seed: A cryptographic seed. :type sequence: int :param sequence: A sequence number. :rtype: string :return: A hash. """ if len(password) not in list(range(4, 64)): raise ValueError('passphrase length') if len(seed) not in list(range(1, 17)): raise ValueError('seed length') for x in seed: if not x in _VALIDSEEDCHARACTERS: raise ValueError('seed composition') if sequence < 0: raise ValueError('sequence') # Pycryptodome only supports byte strings. seed = seed.encode('utf-8') password = password.encode('utf-8') # Discard the first <sequence> keys thehash = MD4.new(seed + password).digest() thehash = _fold_md4_or_md5(thehash) for i in range(0, sequence): thehash = _fold_md4_or_md5(MD4.new(thehash).digest()) # Generate the result return _sixword_from_raw(thehash)
maximumG/exscript
Exscript/util/crypt.py
Python
mit
22,207
[ "Elk", "MOE" ]
1d7433287005d99d2e51bf901d37ad99eb5b6acc739dd03d7c2fe18aa58eeb5c
# -*- coding: utf-8 -*- # # hl_api_types.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ Classes defining the different PyNEST types """ from ..ll_api import * from .. import pynestkernel as kernel from .hl_api_helper import * from .hl_api_simulation import GetKernelStatus import numpy import json from math import floor, log try: import pandas HAVE_PANDAS = True except ImportError: HAVE_PANDAS = False __all__ = [ 'CollocatedSynapses', 'CreateParameter', 'Mask', 'NodeCollection', 'Parameter', 'serializable', 'SynapseCollection', 'to_json', ] def CreateParameter(parametertype, specs): """ Create a parameter. Parameters ---------- parametertype : string Parameter type with or without distance dependency. Can be one of the following: 'constant', 'linear', 'exponential', 'gaussian', 'gaussian2D', 'uniform', 'normal', 'lognormal', 'distance', 'position' specs : dict Dictionary specifying the parameters of the provided `parametertype`, see **Parameter types**. Returns ------- ``Parameter``: Object representing the parameter Notes ----- - Instead of using `CreateParameter` you can also use the various parametrizations embedded in NEST. See for instance :py:func:`.uniform`. **Parameter types** Some available parameter types (`parametertype` parameter), their function and acceptable keys for their corresponding specification dictionaries * Constant :: 'constant' : {'value' : float} # constant value * Randomization :: # random parameter with uniform distribution in [min,max) 'uniform' : {'min' : float, # minimum value, default: 0.0 'max' : float} # maximum value, default: 1.0 # random parameter with normal distribution, optionally truncated # to [min,max) 'normal': {'mean' : float, # mean value, default: 0.0 'sigma': float, # standard deviation, default: 1.0 'min' : float, # minimum value, default: -inf 'max' : float} # maximum value, default: +inf # random parameter with lognormal distribution, # optionally truncated to [min,max) 'lognormal' : {'mu' : float, # mean value of logarithm, default: 0.0 'sigma': float, # standard deviation of log, default: 1.0 'min' : float, # minimum value, default: -inf 'max' : float} # maximum value, default: +inf """ return sli_func('CreateParameter', {parametertype: specs}) class NodeCollectionIterator(object): """ Iterator class for `NodeCollection`. Returns ------- `NodeCollection`: Single node ID `NodeCollection` of respective iteration. """ def __init__(self, nc): self._nc = nc self._increment = 0 def __iter__(self): return self def __next__(self): if self._increment > len(self._nc) - 1: raise StopIteration val = sli_func('Take', self._nc._datum, [self._increment + (self._increment >= 0)]) self._increment += 1 return val class NodeCollection(object): """ Class for `NodeCollection`. `NodeCollection` represents the nodes of a network. The class supports iteration, concatenation, indexing, slicing, membership, length, conversion to and from lists, test for membership, and test for equality. By using the membership functions :py:func:`get()` and :py:func:`set()`, you can get and set desired parameters. A `NodeCollection` is created by the :py:func:`.Create` function, or by converting a list of nodes to a `NodeCollection` with ``nest.NodeCollection(list)``. If your nodes have spatial extent, use the member parameter ``spatial`` to get the spatial information. Example ------- :: import nest nest.ResetKernel() # Create NodeCollection representing nodes nc = nest.Create('iaf_psc_alpha', 10) # Convert from list node_ids_in = [2, 4, 6, 8] new_nc = nest.NodeCollection(node_ids_in) # Convert to list nc_list = nc.tolist() # Concatenation Enrns = nest.Create('aeif_cond_alpha', 600) Inrns = nest.Create('iaf_psc_alpha', 400) nrns = Enrns + Inrns # Slicing and membership print(new_nc[2]) print(new_nc[1:2]) 6 in new_nc """ _datum = None def __init__(self, data=None): if data is None: data = [] if isinstance(data, kernel.SLIDatum): if data.dtype != "nodecollectiontype": raise TypeError("Need NodeCollection Datum.") self._datum = data else: # Data from user, must be converted to datum # Data can be anything that can be converted to a NodeCollection, # such as list, tuple, etc. nc = sli_func('cvnodecollection', data) self._datum = nc._datum def __iter__(self): return NodeCollectionIterator(self) def __add__(self, other): if not isinstance(other, NodeCollection): raise NotImplementedError() return sli_func('join', self._datum, other._datum) def __getitem__(self, key): if isinstance(key, slice): if key.start is None: start = 1 else: start = key.start + 1 if key.start >= 0 else max(key.start, -1 * self.__len__()) if start > self.__len__(): raise IndexError('slice start value outside of the NodeCollection') if key.stop is None: stop = self.__len__() else: stop = min(key.stop, self.__len__()) if key.stop >= 0 else key.stop - 1 if abs(stop) > self.__len__(): raise IndexError('slice stop value outside of the NodeCollection') step = 1 if key.step is None else key.step if step < 1: raise IndexError('slicing step for NodeCollection must be strictly positive') return sli_func('Take', self._datum, [start, stop, step]) elif isinstance(key, (int, numpy.integer)): if abs(key + (key >= 0)) > self.__len__(): raise IndexError('index value outside of the NodeCollection') return sli_func('Take', self._datum, [key + (key >= 0)]) elif isinstance(key, (list, tuple)): if len(key) == 0: return NodeCollection([]) # Must check if elements are bool first, because bool inherits from int if all(isinstance(x, bool) for x in key): if len(key) != len(self): raise IndexError('Bool index array must be the same length as NodeCollection') np_key = numpy.array(key, dtype=numpy.bool) # Checking that elements are not instances of bool too, because bool inherits from int elif all(isinstance(x, int) and not isinstance(x, bool) for x in key): np_key = numpy.array(key, dtype=numpy.uint64) if len(numpy.unique(np_key)) != len(np_key): raise ValueError('All node IDs in a NodeCollection have to be unique') else: raise TypeError('Indices must be integers or bools') return take_array_index(self._datum, np_key) elif isinstance(key, numpy.ndarray): if len(key) == 0: return NodeCollection([]) if len(key.shape) != 1: raise TypeError('NumPy indices must one-dimensional') is_booltype = numpy.issubdtype(key.dtype, numpy.dtype(bool).type) if not (is_booltype or numpy.issubdtype(key.dtype, numpy.integer)): raise TypeError('NumPy indices must be an array of integers or bools') if is_booltype and len(key) != len(self): raise IndexError('Bool index array must be the same length as NodeCollection') if not is_booltype and len(numpy.unique(key)) != len(key): raise ValueError('All node IDs in a NodeCollection have to be unique') return take_array_index(self._datum, key) else: raise IndexError('only integers, slices, lists, tuples, and numpy arrays are valid indices') def __contains__(self, node_id): return sli_func('MemberQ', self._datum, node_id) def __eq__(self, other): if not isinstance(other, NodeCollection): raise NotImplementedError('Cannot compare NodeCollection to {}'.format(type(other).__name__)) if self.__len__() != other.__len__(): return False return sli_func('eq', self, other) def __neq__(self, other): if not isinstance(other, NodeCollection): raise NotImplementedError() return not self == other def __len__(self): return sli_func('size', self._datum) def __str__(self): return sli_func('pcvs', self._datum) def __repr__(self): return sli_func('pcvs', self._datum) def get(self, *params, **kwargs): """ Get parameters from nodes. Parameters ---------- params : str or list, optional Parameters to get from the nodes. It must be one of the following: - A single string. - A list of strings. - One or more strings, followed by a string or list of strings. This is for hierarchical addressing. output : str, ['pandas','json'], optional If the returned data should be in a Pandas DataFrame or in a JSON serializable format. Returns ------- int or float: If there is a single node in the `NodeCollection`, and a single parameter in params. array_like: If there are multiple nodes in the `NodeCollection`, and a single parameter in params. dict: If there are multiple parameters in params. Or, if no parameters are specified, a dictionary containing aggregated parameter-values for all nodes is returned. DataFrame: Pandas Data frame if output should be in pandas format. Raises ------ TypeError If the input params are of the wrong form. KeyError If the specified parameter does not exist for the nodes. See Also -------- :py:func:`set`, :py:func:`GetStatus()<nest.lib.hl_api_info.GetStatus>`, :py:func:`SetStatus()<nest.lib.hl_api_info.SetStatus>` Examples -------- >>> nodes.get() {'archiver_length': (0, 0, 0), 'beta_Ca': (0.001, 0.001, 0.001), 'C_m': (250.0, 250.0, 250.0), ... 'V_th': (-55.0, -55.0, -55.0), 'vp': (0, 0, 0)} >>> nodes.get('V_m') (-70.0, -70.0, -70.0) >>> nodes[0].get('V_m') -70.0 >>> nodes.get('V_m', 'C_m') {'V_m': (-70.0, -70.0, -70.0), 'C_m': (250.0, 250.0, 250.0)} >>> voltmeter.get('events', 'senders') array([...], dtype=int64) """ if not self: raise ValueError('Cannot get parameter of empty NodeCollection') # ------------------------- # # Checks of input # # ------------------------- # if not kwargs: output = '' elif 'output' in kwargs: output = kwargs['output'] if output == 'pandas' and not HAVE_PANDAS: raise ImportError('Pandas could not be imported') else: raise TypeError('Got unexpected keyword argument') pandas_output = output == 'pandas' if len(params) == 0: # get() is called without arguments result = sli_func('get', self._datum) elif len(params) == 1: # params is a tuple with a string or list of strings result = get_parameters(self, params[0]) else: # Hierarchical addressing result = get_parameters_hierarchical_addressing(self, params) if pandas_output: index = self.get('global_id') if len(params) == 1 and is_literal(params[0]): # params is a string result = {params[0]: result} elif len(params) > 1 and is_literal(params[1]): # hierarchical, single string result = {params[1]: result} if len(self) == 1: index = [index] result = {key: [val] for key, val in result.items()} result = pandas.DataFrame(result, index=index) elif output == 'json': result = to_json(result) return result def set(self, params=None, **kwargs): """ Set the parameters of nodes to params. NB! This is almost the same implementation as `SetStatus`. If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values can be single values or list of the same size as the `NodeCollection`. Parameters ---------- params : str or dict or list Dictionary of parameters (either lists or single values) or list of dictionaries of parameters of same length as the `NodeCollection`. kwargs : keyword argument pairs Named arguments of parameters of the elements in the `NodeCollection`. Raises ------ TypeError If the input params are of the wrong form. KeyError If the specified parameter does not exist for the nodes. See Also -------- :py:func:`get`, :py:func:`SetStatus()<nest.lib.hl_api_info.SetStatus>`, :py:func:`GetStatus()<nest.lib.hl_api_info.GetStatus>` """ if not self: return if kwargs and params is None: params = kwargs elif kwargs and params: raise TypeError("must either provide params or kwargs, but not both.") local_nodes = [self.local] if len(self) == 1 else self.local if isinstance(params, dict) and all(local_nodes): node_params = self[0].get() contains_list = [is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) for key, vals in params.items()] if any(contains_list): temp_param = [{} for _ in range(self.__len__())] for key, vals in params.items(): if not is_iterable(vals): for temp_dict in temp_param: temp_dict[key] = vals else: for i, temp_dict in enumerate(temp_param): temp_dict[key] = vals[i] params = temp_param if (isinstance(params, (list, tuple)) and self.__len__() != len(params)): raise TypeError("status dict must be a dict, or a list of dicts of length {} ".format(self.__len__())) sli_func('SetStatus', self._datum, params) def tolist(self): """ Convert `NodeCollection` to list. """ if self.__len__() == 0: return [] return (list(self.get('global_id')) if len(self) > 1 else [self.get('global_id')]) def index(self, node_id): """ Find the index of a node ID in the `NodeCollection`. Parameters ---------- node_id : int Global ID to be found. Raises ------ ValueError If the node ID is not in the `NodeCollection`. """ index = sli_func('Find', self._datum, node_id) if index == -1: raise ValueError('{} is not in NodeCollection'.format(node_id)) return index def __bool__(self): """Converts the NodeCollection to a bool. False if it is empty, True otherwise.""" return len(self) > 0 def __array__(self, dtype=None): """Convert the NodeCollection to a NumPy array.""" return numpy.array(self.tolist(), dtype=dtype) def __getattr__(self, attr): if not self: raise AttributeError('Cannot get attribute of empty NodeCollection') if attr == 'spatial': metadata = sli_func('GetMetadata', self._datum) val = metadata if metadata else None super().__setattr__(attr, val) return self.spatial # NumPy compatibility check: # raises AttributeError to tell NumPy that interfaces other than # __array__ are not available (otherwise get_parameters would be # queried, KeyError would be raised, and all would crash) if attr.startswith('__array_'): raise AttributeError return self.get(attr) def __setattr__(self, attr, value): # `_datum` is the only property of NodeCollection that should not be # interpreted as a property of the model if attr == '_datum': super().__setattr__(attr, value) else: self.set({attr: value}) class SynapseCollectionIterator(object): """ Iterator class for SynapseCollection. """ def __init__(self, synapse_collection): self._iter = iter(synapse_collection._datum) def __iter__(self): return self def __next__(self): return SynapseCollection(next(self._iter)) class SynapseCollection(object): """ Class for Connections. `SynapseCollection` represents the connections of a network. The class supports indexing, iteration, length and equality. You can get and set connection parameters by using the membership functions :py:func:`get()` and :py:func:`set()`. By using the membership function :py:func:`sources()` you get an iterator over source nodes, while :py:func:`targets()` returns an interator over the target nodes of the connections. A SynapseCollection is created by the :py:func:`.GetConnections` function. """ _datum = None def __init__(self, data): if isinstance(data, list): for datum in data: if (not isinstance(datum, kernel.SLIDatum) or datum.dtype != "connectiontype"): raise TypeError("Expected Connection Datum.") self._datum = data elif data is None: # We can have an empty SynapseCollection if there are no connections. self._datum = data else: if (not isinstance(data, kernel.SLIDatum) or data.dtype != "connectiontype"): raise TypeError("Expected Connection Datum.") # self._datum needs to be a list of Connection datums. self._datum = [data] self.print_full = False def __iter__(self): return SynapseCollectionIterator(self) def __len__(self): if self._datum is None: return 0 return len(self._datum) def __eq__(self, other): if not isinstance(other, SynapseCollection): raise NotImplementedError() if self.__len__() != other.__len__(): return False self_get = self.get(['source', 'target', 'target_thread', 'synapse_id', 'port']) other_get = other.get(['source', 'target', 'target_thread', 'synapse_id', 'port']) if self_get != other_get: return False return True def __neq__(self, other): if not isinstance(other, SynapseCollection): raise NotImplementedError() return not self == other def __getitem__(self, key): if isinstance(key, slice): return SynapseCollection(self._datum[key]) else: return SynapseCollection([self._datum[key]]) def __str__(self): """ Printing a `SynapseCollection` returns something of the form: source target synapse model weight delay -------- -------- --------------- -------- ------- 1 4 static_synapse 1.000 1.000 2 4 static_synapse 2.000 1.000 1 3 stdp_synapse 4.000 1.000 1 4 stdp_synapse 3.000 1.000 2 3 stdp_synapse 3.000 1.000 2 4 stdp_synapse 2.000 1.000 If your SynapseCollection has more than 36 elements, only the first and last 15 connections are printed. To display all, first set `print_full = True`. :: conns = nest.GetConnections() conns.print_full = True print(conns) """ def format_row_(s, t, sm, w, dly): try: return f'{s:>{src_len-1}d} {t:>{trg_len}d} {sm:>{sm_len}s} {w:>#{w_len}.{4}g} {dly:>#{d_len}.{4}g}' except ValueError: # Used when we have many connections and print_full=False return f'{s:>{src_len-1}} {t:>{trg_len}} {sm:>{sm_len}} {w:>{w_len}} {dly:>{d_len}}' MAX_SIZE_FULL_PRINT = 35 # 35 is arbitrarily chosen. params = self.get() srcs = params['source'] trgt = params['target'] wght = params['weight'] dlay = params['delay'] s_model = params['synapse_model'] if isinstance(srcs, int): srcs = [srcs] trgt = [trgt] wght = [wght] dlay = [dlay] s_model = [s_model] src_h = 'source' trg_h = 'target' sm_h = 'synapse model' w_h = 'weight' d_h = 'delay' # Find maximum number of characters for each column, used to determine width of column src_len = max(len(src_h) + 2, floor(log(max(srcs), 10))) trg_len = max(len(trg_h) + 2, floor(log(max(trgt), 10))) sm_len = max(len(sm_h) + 2, len(max(s_model, key=len))) w_len = len(w_h) + 2 d_len = len(d_h) + 2 # 35 is arbitrarily chosen. if len(srcs) >= MAX_SIZE_FULL_PRINT and not self.print_full: # u'\u22EE ' is the unicode for vertical ellipsis, used when we have many connections srcs = srcs[:15] + [u'\u22EE '] + srcs[-15:] trgt = trgt[:15] + [u'\u22EE '] + trgt[-15:] wght = wght[:15] + [u'\u22EE '] + wght[-15:] dlay = dlay[:15] + [u'\u22EE '] + dlay[-15:] s_model = s_model[:15] + [u'\u22EE '] + s_model[-15:] headers = f'{src_h:^{src_len}} {trg_h:^{trg_len}} {sm_h:^{sm_len}} {w_h:^{w_len}} {d_h:^{d_len}}' + '\n' boarders = '-'*src_len + ' ' + '-'*trg_len + ' ' + '-'*sm_len + ' ' + '-'*w_len + ' ' + '-'*d_len + '\n' output = '\n'.join(format_row_(s, t, sm, w, d) for s, t, sm, w, d in zip(srcs, trgt, s_model, wght, dlay)) result = headers + boarders + output return result def __getattr__(self, attr): if attr == 'distance': dist = sli_func('Distance', self._datum) super().__setattr__(attr, dist) return self.distance return self.get(attr) def __setattr__(self, attr, value): # `_datum` is the only property of SynapseCollection that should not be # interpreted as a property of the model if attr == '_datum' or 'print_full': super().__setattr__(attr, value) else: self.set({attr: value}) def sources(self): """Returns iterator containing the source node IDs of the `SynapseCollection`.""" sources = self.get('source') if not isinstance(sources, (list, tuple)): sources = (sources,) return iter(sources) def targets(self): """Returns iterator containing the target node IDs of the `SynapseCollection`.""" targets = self.get('target') if not isinstance(targets, (list, tuple)): targets = (targets,) return iter(targets) def get(self, keys=None, output=''): """ Return a parameter dictionary of the connections. If `keys` is a string, a list of values is returned, unless we have a single connection, in which case the single value is returned. `keys` may also be a list, in which case a dictionary with a list of values is returned. Parameters ---------- keys : str or list, optional String or a list of strings naming model properties. get then returns a single value or a dictionary with lists of values belonging to the given `keys`. output : str, ['pandas','json'], optional If the returned data should be in a Pandas DataFrame or in a JSON serializable format. Returns ------- dict: All parameters, or, if keys is a list of strings, a dictionary with lists of corresponding parameters type: If keys is a string, the corresponding parameter(s) is returned Raises ------ TypeError If input params are of the wrong form. KeyError If the specified parameter does not exist for the connections. See Also -------- set Examples -------- >>> conns.get() {'delay': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ... 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]} >>> conns.get('weight') [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> conns[0].get('weight') 1.0 >>> nodes.get(['source', 'weight']) {'source': [1, 1, 1, 2, 2, 2, 3, 3, 3], 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]} """ pandas_output = output == 'pandas' if pandas_output and not HAVE_PANDAS: raise ImportError('Pandas could not be imported') # Return empty tuple if we have no connections or if we have done a nest.ResetKernel() num_conn = GetKernelStatus('num_connections') if self.__len__() == 0 or num_conn == 0: return () if keys is None: cmd = 'GetStatus' elif is_literal(keys): # Extracting the correct values will be done in restructure_data below cmd = 'GetStatus' elif is_iterable(keys): keys_str = " ".join("/{0}".format(x) for x in keys) cmd = 'GetStatus {{ [ [ {0} ] ] get }} Map'.format(keys_str) else: raise TypeError("keys should be either a string or an iterable") sps(self._datum) sr(cmd) result = spp() # Need to restructure the data. final_result = restructure_data(result, keys) if pandas_output: index = (self.get('source') if self.__len__() > 1 else (self.get('source'),)) if is_literal(keys): final_result = {keys: final_result} final_result = pandas.DataFrame(final_result, index=index) elif output == 'json': final_result = to_json(final_result) return final_result def set(self, params=None, **kwargs): """ Set the parameters of the connections to `params`. NB! This is almost the same implementation as SetStatus If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values can be single values or list of the same size as the `SynapseCollection`. Parameters ---------- params : str or dict or list Dictionary of parameters (either lists or single values) or list of dictionaries of parameters of same length as `SynapseCollection`. kwargs : keyword argument pairs Named arguments of parameters of the elements in the `SynapseCollection`. Raises ------ TypeError If input params are of the wrong form. KeyError If the specified parameter does not exist for the connections. See Also -------- get """ # This was added to ensure that the function is a nop (instead of, # for instance, raising an exception) when applied to an empty # SynapseCollection, or after having done a nest.ResetKernel(). if self.__len__() == 0 or GetKernelStatus()['network_size'] == 0: return if (isinstance(params, (list, tuple)) and self.__len__() != len(params)): raise TypeError("status dict must be a dict, or a list of dicts of length {}".format(self.__len__())) if kwargs and params is None: params = kwargs elif kwargs and params: raise TypeError("must either provide params or kwargs, but not both.") if isinstance(params, dict): node_params = self[0].get() contains_list = [is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) for key, vals in params.items()] if any(contains_list): temp_param = [{} for _ in range(self.__len__())] for key, vals in params.items(): if not is_iterable(vals): for temp_dict in temp_param: temp_dict[key] = vals else: for i, temp_dict in enumerate(temp_param): temp_dict[key] = vals[i] params = temp_param params = broadcast(params, self.__len__(), (dict,), "params") sps(self._datum) sps(params) sr('2 arraystore') sr('Transpose { arrayload pop SetStatus } forall') class CollocatedSynapses(object): """ Class for collocated synapse specifications. Wrapper around a list of specifications, used when calling :py:func:`.Connect`. Example ------- :: nodes = nest.Create('iaf_psc_alpha', 3) syn_spec = nest.CollocatedSynapses({'weight': 4., 'delay': 1.5}, {'synapse_model': 'stdp_synapse'}, {'synapse_model': 'stdp_synapse', 'alpha': 3.}) nest.Connect(nodes, nodes, conn_spec='one_to_one', syn_spec=syn_spec) conns = nest.GetConnections() print(conns.alpha) print(len(syn_spec)) """ def __init__(self, *args): self.syn_specs = args def __len__(self): return len(self.syn_specs) class Mask(object): """ Class for spatial masks. Masks are used when creating connections when nodes have spatial extent. A mask describes the area of the pool population that shall be searched to find nodes to connect to for any given node in the driver population. Masks are created using the :py:func:`.CreateMask` command. """ _datum = None # The constructor should not be called by the user def __init__(self, datum): """Masks must be created using the CreateMask command.""" if not isinstance(datum, kernel.SLIDatum) or datum.dtype != "masktype": raise TypeError("expected mask Datum") self._datum = datum # Generic binary operation def _binop(self, op, other): if not isinstance(other, Mask): raise NotImplementedError() return sli_func(op, self._datum, other._datum) def __or__(self, other): return self._binop("or", other) def __and__(self, other): return self._binop("and", other) def __sub__(self, other): return self._binop("sub", other) def Inside(self, point): """ Test if a point is inside a mask. Parameters ---------- point : tuple/list of float values Coordinate of point Returns ------- out : bool True if the point is inside the mask, False otherwise """ return sli_func("Inside", point, self._datum) class Parameter(object): """ Class for parameters A parameter may be used as a probability kernel when creating connections and nodes or as synaptic parameters (such as weight and delay). Parameters are created using the :py:func:`.CreateParameter` command. """ _datum = None # The constructor should not be called by the user def __init__(self, datum): """Parameters must be created using the CreateParameter command.""" if not isinstance(datum, kernel.SLIDatum) or datum.dtype != "parametertype": raise TypeError("expected parameter datum") self._datum = datum # Generic binary operation def _binop(self, op, other, params=None): if isinstance(other, (int, float)): other = CreateParameter('constant', {'value': float(other)}) if not isinstance(other, Parameter): raise NotImplementedError() if params is None: return sli_func(op, self._datum, other._datum) else: return sli_func(op, self._datum, other._datum, params) def __add__(self, other): return self._binop("add", other) def __radd__(self, other): return self + other def __sub__(self, other): return self._binop("sub", other) def __rsub__(self, other): return self * (-1) + other def __neg__(self): return self * (-1) def __mul__(self, other): return self._binop("mul", other) def __rmul__(self, other): return self * other def __div__(self, other): return self._binop("div", other) def __truediv__(self, other): return self._binop("div", other) def __pow__(self, exponent): return sli_func("pow", self._datum, float(exponent)) def __lt__(self, other): return self._binop("compare", other, {'comparator': 0}) def __le__(self, other): return self._binop("compare", other, {'comparator': 1}) def __eq__(self, other): return self._binop("compare", other, {'comparator': 2}) def __ne__(self, other): return self._binop("compare", other, {'comparator': 3}) def __ge__(self, other): return self._binop("compare", other, {'comparator': 4}) def __gt__(self, other): return self._binop("compare", other, {'comparator': 5}) def GetValue(self): """ Compute value of parameter. Returns ------- out : value The value of the parameter See also -------- CreateParameter Example ------- :: import nest # normal distribution parameter P = nest.CreateParameter('normal', {'mean': 0.0, 'sigma': 1.0}) # get out value P.GetValue() """ return sli_func("GetValue", self._datum) def is_spatial(self): return sli_func('ParameterIsSpatial', self._datum) def apply(self, spatial_nc, positions=None): if positions is None: return sli_func('Apply', self._datum, spatial_nc) else: if len(spatial_nc) != 1: raise ValueError('The NodeCollection must contain a single node ID only') if not isinstance(positions, (list, tuple)): raise TypeError('Positions must be a list or tuple of positions') for pos in positions: if not isinstance(pos, (list, tuple, numpy.ndarray)): raise TypeError('Each position must be a list or tuple') if len(pos) != len(positions[0]): raise ValueError('All positions must have the same number of dimensions') return sli_func('Apply', self._datum, {'source': spatial_nc, 'targets': positions}) def serializable(data): """Make data serializable for JSON. Parameters ---------- data : any Returns ------- data_serialized : str, int, float, list, dict Data can be encoded to JSON """ if isinstance(data, (numpy.ndarray, NodeCollection)): return data.tolist() if isinstance(data, SynapseCollection): # Get full information from SynapseCollection return serializable(data.get()) if isinstance(data, kernel.SLILiteral): # Get name of SLILiteral. return data.name if isinstance(data, (list, tuple)): return [serializable(d) for d in data] if isinstance(data, dict): return dict([(key, serializable(value)) for key, value in data.items()]) return data def to_json(data, **kwargs): """Serialize data to JSON. Parameters ---------- data : any kwargs : keyword argument pairs Named arguments of parameters for `json.dumps` function. Returns ------- data_json : str JSON format of the data """ data_serialized = serializable(data) data_json = json.dumps(data_serialized, **kwargs) return data_json
jakobj/nest-simulator
pynest/nest/lib/hl_api_types.py
Python
gpl-2.0
38,514
[ "Gaussian" ]
887d4df7e242592f3dd263ecbe01fdfef6b21c0bd79c6c52c72b110801638aa5
import caffe import numpy as np import argparse, pprint import scipy.misc as scm from os import path as osp from easydict import EasyDict as edict import time import glog import pdb import pickle import matplotlib.pyplot as plt import copy class GaussRenderLayer(caffe.Layer): @classmethod def parse_args(cls, argsStr): parser = argparse.ArgumentParser(description='GaussRender Layer') parser.add_argument('--K', default=100.0, type=float) parser.add_argument('--T', default=-50.0, type=float) parser.add_argument('--sigma', default=0.001, type=float) parser.add_argument('--imgSz', default=224, type=int) args = parser.parse_args(argsStr.split()) print('Using Config:') pprint.pprint(args) return args def setup(self, bottom, top): self.param_ = GaussRenderLayer.parse_args(self.param_str) assert len(bottom) == 1, 'There should be 1 bottom blob' kpShape = bottom[0].data.shape batchSz, numKp, numCoords, _ = kpShape assert numCoords == 2, 'Keypoints are defined by 2-D coordinates' self.batchSz_ = batchSz self.nKp_ = numKp assert len(top)==1, 'There should be only one output blob' top[0].reshape(self.batchSz_, self.nKp_ , self.param_.imgSz, self.param_.imgSz) #Form the gaussian window x = np.linspace(-self.param_.imgSz, self.param_.imgSz, 2 * self.param_.imgSz + 1) y = np.linspace(-self.param_.imgSz, self.param_.imgSz, 2 * self.param_.imgSz + 1) xx, yy = np.meshgrid(x, y) dist = xx * xx + yy * yy; self.g_ = ((self.param_.K * np.exp(-self.param_.sigma * dist)) + self.param_.T).astype(np.float32) def forward(self, bottom, top): for b in range(self.batchSz_): kps = bottom[0].data[b] top[0].data[b][...] = self.param_.T for k in range(self.nKp_): x, y = kps[k] x, y = int(round(x + 1)), int(round(y + 1)) #Center at the gaussian windown delY, delX = self.param_.imgSz - y, self.param_.imgSz - x xSt, ySt = max(0, delX), max(0, delY) yEn, xEn = delY + self.param_.imgSz, delX + self.param_.imgSz yEn = min(2 * self.param_.imgSz + 1, yEn) xEn = min(2 * self.param_.imgSz + 1, xEn) yImSt, xImSt = max(0, y - self.param_.imgSz), max(0, x - self.param_.imgSz) yImEn, xImEn = yImSt + (yEn - ySt), xImSt + (xEn - xSt) top[0].data[b][k,yImSt:yImEn,xImSt:xImEn] = copy.deepcopy(self.g_[ySt:yEn, xSt:xEn]) def backward(self, top, propagate_down, bottom): pass def reshape(self, bottom, top): pass def test_render_layer(x=225, y = 250): net = caffe.Net('test/gauss_render.prototxt', caffe.TEST) plt.ion() fig = plt.figure() ax = fig.add_subplot(1,1,1) pos = np.zeros((1,1,2,1)).astype(np.float32) pos[0,0,0] = x pos[0,0,1] = y data = net.forward(blobs=['gauss'], **{'kp': pos}) print (data['gauss'][0].transpose((1,2,0)).shape) ax.imshow((data['gauss'][0].transpose((1,2,0)).squeeze() + 50.0).astype(np.uint8))
pulkitag/caffe-python-layers
python_ief.py
Python
bsd-3-clause
2,878
[ "Gaussian" ]
eca6867ef45268215f653d15ed1d7256e9f82c8bdb3367912b19eaa9f682805b
# -*- coding: utf-8 -*- # Copyright (c) 2015-2018, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 import numpy as np import pandas as pd from unittest import TestCase from exatomic.base import resource from exatomic.adf.output import Output class TestADFOutput(TestCase): """Test the ADF output file editor.""" def setUp(self): self.lu = Output(resource('adf-lu.out')) # TODO :: File with excitation def test_parse_atom(self): self.lu.parse_atom() self.assertEqual(self.lu.atom.shape[0], 1) self.assertTrue(np.all(pd.notnull(self.lu.atom))) def test_parse_basis_set(self): self.lu.parse_basis_set() self.assertEqual(self.lu.basis_set.shape[0], 32) self.assertTrue(np.all(pd.notnull(self.lu.basis_set))) def test_parse_basis_set_order(self): self.lu.parse_basis_set_order() self.assertEqual(self.lu.basis_set_order.shape[0], 109) self.assertTrue(np.all(pd.notnull(self.lu.basis_set_order))) def test_parse_momatrix_and_to_universe(self): self.lu.parse_momatrix() uni = self.lu.to_universe() self.assertEqual(self.lu.momatrix.shape[0], uni.basis_dims['ncc'] * uni.basis_dims['ncs']) def test_parse_contribution(self): self.lu.parse_contribution() self.assertEqual(self.lu.contribution.shape[0], 78) self.assertTrue(np.all(pd.notnull(self.lu.contribution))) def test_parse_orbital(self): self.lu.parse_orbital() self.assertEqual(self.lu.orbital.shape[0], 20) self.assertTrue(np.all(pd.notnull(self.lu.orbital)))
avmarchenko/exatomic
exatomic/adf/tests/test_output.py
Python
apache-2.0
1,700
[ "ADF" ]
e4957e71e6e4a459ebbdef103f886ca0396e32197a260aa91e3fa90c68fdafb1